[PATCH i-g-t v21 36/36] Cleanup

Zbigniew Kempczyński zbigniew.kempczynski at intel.com
Fri Feb 26 11:11:53 UTC 2021


---
 lib/igt_map.c                    |   5 +-
 lib/intel_allocator.c            | 627 +++++++++++++++++++++++--------
 lib/intel_allocator.h            |   8 +-
 lib/intel_allocator_msgchannel.h |   9 +-
 lib/intel_allocator_random.c     |   9 +-
 lib/intel_allocator_simple.c     |  28 +-
 lib/intel_batchbuffer.c          |  81 +++-
 lib/intel_batchbuffer.h          |  24 +-
 tests/i915/api_intel_allocator.c | 215 ++++++-----
 tests/i915/api_intel_bb.c        |  91 +++++
 10 files changed, 796 insertions(+), 301 deletions(-)

diff --git a/lib/igt_map.c b/lib/igt_map.c
index ca58efbd9..7868e2e3c 100644
--- a/lib/igt_map.c
+++ b/lib/igt_map.c
@@ -56,7 +56,7 @@ static inline uint64_t hash_64_4bytes(const void *val, unsigned int bits)
 }
 
 void __igt_map_init(struct igt_map *map, igt_map_equal_fn eq_fn,
-		 igt_map_hash_fn hash_fn, uint32_t initial_bits)
+		    igt_map_hash_fn hash_fn, uint32_t initial_bits)
 {
 	map->equal_fn = eq_fn == NULL ? equal_4bytes : eq_fn;
 	map->hash_fn = hash_fn == NULL ? hash_64_4bytes : hash_fn;
@@ -104,9 +104,10 @@ void *igt_map_find(struct igt_map *map, const void *key)
 {
 	struct igt_map_entry *pos = NULL;
 
-	igt_map_for_each_possible(map, pos, key)
+	igt_map_for_each_possible(map, pos, key) {
 		if (map->equal_fn(pos->key, key))
 			break;
+	}
 
 	return pos ? pos->value : NULL;
 }
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index 1295f5183..bce118208 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -24,6 +24,7 @@
 static const char *reqtype_str[] = {
 	[REQ_STOP]		= "stop",
 	[REQ_OPEN]		= "open",
+	[REQ_OPEN_AS]		= "open as",
 	[REQ_CLOSE]		= "close",
 	[REQ_ADDRESS_RANGE]	= "address range",
 	[REQ_ALLOC]		= "alloc",
@@ -44,15 +45,64 @@ static inline const char *reqstr(enum reqtype request_type)
 #define alloc_debug(...) {}
 #endif
 
-struct intel_allocator *intel_allocator_random_create(int fd, uint32_t ctx);
-struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx);
+struct allocator {
+	int fd;
+	uint32_t ctx;
+	uint32_t vm;
+	struct intel_allocator *ial;
+	uint64_t handle;
+};
+
+struct handle_entry {
+	uint64_t handle;
+	struct allocator *al;
+};
+
+struct intel_allocator *intel_allocator_random_create(int fd);
+struct intel_allocator *intel_allocator_simple_create(int fd);
 struct intel_allocator *
-intel_allocator_simple_create_full(int fd, uint32_t ctx,
-				   uint64_t start, uint64_t end,
+intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
 				   enum allocator_strategy strategy);
 
-static struct igt_map *allocators_map;
+/*
+ * Instead of trying to find first empty handle just get new one. Assuming
+ * our counter is incremented 2^32 times per second (4GHz clock and handle
+ * assignment takes single clock) 64-bit counter would wrap around after
+ * ~68 years.
+ *
+ *                   allocator
+ * handles           <fd, ctx>           intel allocator
+ * +-----+           +--------+          +-------------+
+ * |  1  +---------->+  fd: 3 +----+---->+ data: ...   |
+ * +-----+           | ctx: 1 |    |     | refcount: 2 |
+ * |  2  +------     +--------+    |     +-------------+
+ * +-----+     +---->+  fd: 3 +----+
+ * |  3  +--+        | ctx: 1 |          intel allocator
+ * +-----+  |        +--------+          +-------------+
+ * | ... |  +------->+  fd: 3 +--------->+ data: ...   |
+ * +-----+           | ctx: 2 |          | refcount: 1 |
+ * |  n  +--------+  +--------+          +-------------+
+ * +-----+        |
+ * | ... +-----+  |  allocator
+ * +-----+     |  |  <fd, vm>            intel allocator
+ * | ... +--+  |  |  +--------+          +-------------+
+ * +     +  |  |  +->+  fd: 3 +--+--+--->+ data: ...   |
+ *          |  |     |  vm: 1 |  |  |    | refcount: 3 |
+ *          |  |     +--------+  |  |    +-------------+
+ *          |  +---->+  fd: 3 +--+  |
+ *          |        |  vm: 1 |     |
+ *          |        +--------+     |
+ *          +------->+  fd: 3 +-----+
+ *                   |  vm: 2 |
+ *                   +--------+
+ */
+static _Atomic(uint64_t) next_handle;
+static struct igt_map *handles;
+static struct igt_map *ctx_map;
+static struct igt_map *vm_map;
 static pthread_mutex_t map_mutex = PTHREAD_MUTEX_INITIALIZER;
+#define GET_MAP(vm) ((vm) ? vm_map : ctx_map)
+
 static bool multiprocess;
 static pthread_t allocator_thread;
 static bool allocator_thread_running;
@@ -101,7 +151,103 @@ static int recv_resp(struct msg_channel *msgchan,
 	return msgchan->recv_resp(msgchan, response);
 }
 
-static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
+static uint64_t __handle_create(struct allocator *al)
+{
+	struct handle_entry *h = malloc(sizeof(*h));
+
+	igt_assert(h);
+	h->handle = atomic_fetch_add(&next_handle, 1);
+	h->al = al;
+	igt_map_add(handles, h, h);
+
+	return h->handle;
+}
+
+static void __handle_destroy(uint64_t handle)
+{
+	struct handle_entry *h, he = { .handle = handle };
+
+	h = igt_map_find(handles, &he);
+	igt_assert(h);
+	igt_map_del(handles, &he);
+	free(h);
+}
+
+static struct allocator *__allocator_find(int fd, uint32_t ctx, uint32_t vm)
+{
+	struct allocator al = { .fd = fd, .ctx = ctx, .vm = vm };
+	struct igt_map *map = GET_MAP(vm);
+
+	return igt_map_find(map, &al);
+}
+
+static struct allocator *__allocator_find_by_handle(uint64_t handle)
+{
+	struct handle_entry *h, he = { .handle = handle };
+
+	h = igt_map_find(handles, &he);
+	if (!h)
+		return NULL;
+
+	return h->al;
+}
+
+static struct allocator *__allocator_create(int fd, uint32_t ctx, uint32_t vm,
+					    struct intel_allocator *ial)
+{
+	struct igt_map *map = GET_MAP(vm);
+	struct allocator *al = malloc(sizeof(*al));
+
+	igt_assert(al);
+	igt_assert(fd == ial->fd);
+	al->fd = fd;
+	al->ctx = ctx;
+	al->vm = vm;
+	al->ial = ial;
+
+	igt_map_add(map, al, al);
+
+	return al;
+}
+
+static void __allocator_destroy(struct allocator *al)
+{
+	struct igt_map *map = GET_MAP(al->vm);
+
+	igt_map_del(map, al);
+	free(al);
+}
+
+static int __allocator_get(struct allocator *al)
+{
+	struct intel_allocator *ial = al->ial;
+	int refcount;
+
+	refcount = atomic_fetch_add(&ial->refcount, 1);
+	igt_assert(refcount >= 0);
+
+	return refcount;
+}
+
+static bool __allocator_put(struct allocator *al)
+{
+	struct intel_allocator *ial = al->ial;
+	bool released = false;
+	int refcount;
+
+	refcount = atomic_fetch_sub(&ial->refcount, 1);
+	igt_assert(refcount >= 1);
+	if (refcount == 1) {
+		if (!ial->is_empty(ial) && warn_if_not_empty)
+			igt_warn("Allocator not clear before destroy!\n");
+
+		released = true;
+	}
+
+	return released;
+}
+
+static struct intel_allocator *intel_allocator_create(int fd,
 						      uint64_t start, uint64_t end,
 						      uint8_t allocator_type,
 						      uint8_t allocator_strategy)
@@ -123,14 +269,13 @@ static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
 			     "We cannot use NONE allocator\n");
 		break;
 	case INTEL_ALLOCATOR_RANDOM:
-		ial = intel_allocator_random_create(fd, ctx);
+		ial = intel_allocator_random_create(fd);
 		break;
 	case INTEL_ALLOCATOR_SIMPLE:
 		if (!start && !end)
-			ial = intel_allocator_simple_create(fd, ctx);
+			ial = intel_allocator_simple_create(fd);
 		else
-			ial = intel_allocator_simple_create_full(fd, ctx,
-								 start, end,
+			ial = intel_allocator_simple_create_full(fd, start, end,
 								 allocator_strategy);
 		break;
 	default:
@@ -139,107 +284,89 @@ static struct intel_allocator *intel_allocator_create(int fd, uint32_t ctx,
 		break;
 	}
 
+	igt_assert(ial);
+
 	ial->type = allocator_type;
 	ial->strategy = allocator_strategy;
-	atomic_fetch_add(&ial->refcount, 1);
 	pthread_mutex_init(&ial->mutex, NULL);
 
-	igt_map_add(allocators_map, ial, ial);
-
 	return ial;
 }
 
 static void intel_allocator_destroy(struct intel_allocator *ial)
 {
-	alloc_info("Destroying allocator (empty: %d)\n",
-		   ial->is_empty(ial));
+	alloc_info("Destroying allocator (empty: %d)\n", ial->is_empty(ial));
 
 	ial->destroy(ial);
 }
 
-static struct intel_allocator *__allocator_get(int fd, uint32_t ctx)
-{
-	struct intel_allocator *ial, ials = { .fd = fd, .ctx = ctx };
-	int refcount;
-
-	ial = igt_map_find(allocators_map, &ials);
-	if (!ial)
-		goto out_get;
-
-	refcount = atomic_fetch_add(&ial->refcount, 1);
-	igt_assert(refcount > 0);
-
-out_get:
-
-	return ial;
-}
-
-static bool __allocator_put(struct intel_allocator *ial)
+static struct allocator *allocator_open(int fd, uint32_t ctx, uint32_t vm,
+					uint64_t start, uint64_t end,
+					uint8_t allocator_type,
+					uint8_t allocator_strategy)
 {
-	struct intel_allocator ials = { .fd = ial->fd, .ctx = ial->ctx };
-	bool released = false;
-	int refcount;
-
-	ial = igt_map_find(allocators_map, &ials);
-	igt_assert(ial);
+	struct intel_allocator *ial;
+	struct allocator *al;
+	const char *idstr = vm ? "vm" : "ctx";
 
-	refcount = atomic_fetch_sub(&ial->refcount, 1);
-	alloc_debug("Refcount: %d\n", refcount);
-	igt_assert(refcount >= 1);
-	if (refcount == 1) {
-		igt_map_del(allocators_map, ial);
+	al = __allocator_find(fd, ctx, vm);
+	if (!al) {
+		alloc_info("Allocator fd: %d, ctx: %u, vm: %u, <0x%llx : 0x%llx> "
+			    "not found, creating one\n",
+			    fd, ctx, vm, (long long) start, (long long) end);
+		ial = intel_allocator_create(fd, start, end, allocator_type,
+					     allocator_strategy);
+		al = __allocator_create(fd, ctx, vm, ial);
+	} else {
+		al = __allocator_create(al->fd, al->ctx, al->vm, al->ial);
+		ial = al->ial;
+	}
+	__allocator_get(al);
+	al->handle = __handle_create(al);
 
-		if (!ial->is_empty(ial) && warn_if_not_empty)
-			igt_warn("Allocator not clear before destroy!\n");
+	if (ial->type != allocator_type) {
+		igt_warn("Allocator type must be same for fd/%s\n", idstr);
+		ial = NULL;
+	}
 
-		released = true;
+	if (ial->strategy != allocator_strategy) {
+		igt_warn("Allocator strategy must be same or fd/%s\n", idstr);
+		ial = NULL;
 	}
 
-	return released;
+	return al;
 }
 
-static struct intel_allocator *allocator_open(int fd, uint32_t ctx,
-					      uint64_t start, uint64_t end,
-					      uint8_t allocator_type,
-					      uint8_t allocator_strategy)
+static struct allocator *allocator_open_as(struct allocator *base,
+					   uint32_t new_vm)
 {
-	struct intel_allocator *ial;
-
-	pthread_mutex_lock(&map_mutex);
-	ial = __allocator_get(fd, ctx);
-	if (!ial) {
-		alloc_debug("Allocator fd: %d, ctx: %u, <0x%llx : 0x%llx> "
-			    "not found, creating one\n",
-			    fd, ctx, (long long) start, (long long) end);
-		ial = intel_allocator_create(fd, ctx, start, end,
-					     allocator_type, allocator_strategy);
-	}
-	pthread_mutex_unlock(&map_mutex);
+	struct allocator *al;
 
-	igt_assert_f(ial->type == allocator_type,
-		     "Allocator type must be same for fd/ctx\n");
-	igt_assert_f(ial->strategy == allocator_strategy,
-		     "Allocator strategy must be same or fd/ctx\n");
+	al = __allocator_create(base->fd, base->ctx, new_vm, base->ial);
+	__allocator_get(al);
+	al->handle = __handle_create(al);
 
-	return ial;
+	return al;
 }
 
-static bool allocator_close(uint64_t allocator_handle)
+static bool allocator_close(uint64_t ahnd)
 {
-	struct intel_allocator *ial = from_user_pointer(allocator_handle);
+	struct allocator *al;
 	bool released, is_empty = false;
 
-	igt_assert(ial);
-
-	pthread_mutex_lock(&map_mutex);
+	al = __allocator_find_by_handle(ahnd);
+	if (!al) {
+		igt_warn("Cannot find handle: %llx\n", (long long) ahnd);
+		return false;
+	}
 
-	released = __allocator_put(ial);
+	released = __allocator_put(al);
 	if (released) {
-		is_empty = ial->is_empty(ial);
-		intel_allocator_destroy(ial);
+		is_empty = al->ial->is_empty(al->ial);
+		intel_allocator_destroy(al->ial);
 	}
-
-	pthread_mutex_unlock(&map_mutex);
+	__allocator_destroy(al);
+	__handle_destroy(ahnd);
 
 	return is_empty;
 }
@@ -282,12 +409,28 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 
 	if (same_process) {
 		struct intel_allocator *ial;
+		struct allocator *al;
 		uint64_t start, end, size;
+		uint32_t ctx, vm;
 		bool allocated, reserved, unreserved;
+		/* Used when debug is on, so avoid compilation warnings */
+		(void) ctx;
+		(void) vm;
 
-		/* Mutex only work on allocator instance, not stop/open/close */
+		/*
+		 * Mutex only work on allocator instance, not stop/open/close
+		 */
 		if (req->request_type > REQ_CLOSE) {
-			ial = from_user_pointer(req->allocator_handle);
+			/*
+			 * We have to lock map mutex because concurrent open
+			 * can lead to resizing the map.
+			 */
+			pthread_mutex_lock(&map_mutex);
+			al = __allocator_find_by_handle(req->allocator_handle);
+			pthread_mutex_unlock(&map_mutex);
+			igt_assert(al);
+
+			ial = al->ial;
 			igt_assert(ial);
 
 			pthread_mutex_lock(&ial->mutex);
@@ -299,30 +442,92 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 			break;
 
 		case REQ_OPEN:
-			ial = allocator_open(req->open.fd, req->open.ctx,
-					     req->open.start, req->open.end,
-					     req->open.allocator_type,
-					     req->open.allocator_strategy);
-			igt_assert(ial);
+			pthread_mutex_lock(&map_mutex);
+			al = allocator_open(req->open.fd,
+					    req->open.ctx, req->open.vm,
+					    req->open.start, req->open.end,
+					    req->open.allocator_type,
+					    req->open.allocator_strategy);
+			ret = atomic_load(&al->ial->refcount);
+			pthread_mutex_unlock(&map_mutex);
 
 			resp->response_type = RESP_OPEN;
-			resp->open.allocator_handle = to_user_pointer(ial);
-			alloc_info("<open> [tid: %ld] fd: %d, ctx: %u, alloc_type: %u, "
-				   "ahnd: %p, refcnt: %d\n",
-				   (long) req->tid, req->open.fd, req->open.ctx,
-				   req->open.allocator_type, ial,
-				   atomic_load(&ial->refcount));
+			resp->open.allocator_handle = al->handle;
+			alloc_info("<open> [tid: %ld] fd: %d, ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u"
+				   ", alloc_type: %u, refcnt: %d->%d\n",
+				   (long) req->tid, req->open.fd, al->handle,
+				   req->open.ctx,
+				   req->open.vm, req->open.allocator_type,
+				   ret - 1, ret);
+			break;
+
+		case REQ_OPEN_AS:
+			/* lock first to avoid concurrent close */
+			pthread_mutex_lock(&map_mutex);
+
+			al = __allocator_find_by_handle(req->allocator_handle);
+			resp->response_type = RESP_OPEN_AS;
+
+			if (!al) {
+				alloc_info("<open as> [tid: %ld] ahnd: %" PRIx64
+					   " -> no handle\n",
+					   (long) req->tid, req->allocator_handle);
+				pthread_mutex_unlock(&map_mutex);
+				break;
+			}
+
+			if (!al->vm) {
+				alloc_info("<open as> [tid: %ld] ahnd: %" PRIx64
+					   " -> only open as for <fd, vm> is possible\n",
+					   (long) req->tid, req->allocator_handle);
+				pthread_mutex_unlock(&map_mutex);
+				break;
+			}
+
+
+			al = allocator_open_as(al, req->open_as.new_vm);
+			ret = atomic_load(&al->ial->refcount);
+			pthread_mutex_unlock(&map_mutex);
+
+			resp->response_type = RESP_OPEN_AS;
+			resp->open.allocator_handle = al->handle;
+			alloc_info("<open as> [tid: %ld] fd: %d, ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u"
+				   ", alloc_type: %u, refcnt: %d->%d\n",
+				   (long) req->tid, al->fd, al->handle,
+				   al->ctx, al->vm, al->ial->type,
+				   ret - 1, ret);
 			break;
 
 		case REQ_CLOSE:
-			ial = from_user_pointer(req->allocator_handle);
-			igt_assert(ial);
+			pthread_mutex_lock(&map_mutex);
+			al = __allocator_find_by_handle(req->allocator_handle);
+			resp->response_type = RESP_CLOSE;
+
+			if (!al) {
+				alloc_info("<close> [tid: %ld] ahnd: %" PRIx64
+					   " -> no handle\n",
+					   (long) req->tid, req->allocator_handle);
+				pthread_mutex_unlock(&map_mutex);
+				break;
+			}
 
 			resp->response_type = RESP_CLOSE;
-			ret = atomic_load(&ial->refcount);
+			ctx = al->ctx;
+			vm = al->vm;
+
+			ret = atomic_load(&al->ial->refcount);
 			resp->close.is_empty = allocator_close(req->allocator_handle);
-			alloc_info("<close> [tid: %ld] ahnd: %p, is_empty: %d, refcnt: %d\n",
-				   (long) req->tid, ial, resp->close.is_empty, ret);
+			pthread_mutex_unlock(&map_mutex);
+
+			alloc_info("<close> [tid: %ld] ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u"
+				   ", is_empty: %d, refcnt: %d->%d\n",
+				   (long) req->tid, req->allocator_handle,
+				   ctx, vm, resp->close.is_empty,
+				   ret, al ? ret - 1 : 0);
+
 			break;
 
 		case REQ_ADDRESS_RANGE:
@@ -330,9 +535,11 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 			ial->get_address_range(ial, &start, &end);
 			resp->address_range.start = start;
 			resp->address_range.end = end;
-			alloc_info("<address range> [tid: %ld] "
-				   "start: 0x%" PRIx64 ", end: 0x%" PRId64 "\n",
-				   (long) req->tid, start, end);
+			alloc_info("<address range> [tid: %ld] ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u"
+				   ", start: 0x%" PRIx64 ", end: 0x%" PRId64 "\n",
+				   (long) req->tid, req->allocator_handle,
+				   al->ctx, al->vm, start, end);
 			break;
 
 		case REQ_ALLOC:
@@ -341,10 +548,12 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 							req->alloc.handle,
 							req->alloc.size,
 							req->alloc.alignment);
-			alloc_info("<alloc> [tid: %ld] handle: %u, "
-				   "size: 0x%" PRIx64 ", offset: 0x%" PRIx64
+			alloc_info("<alloc> [tid: %ld] ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u, handle: %u"
+				   ", size: 0x%" PRIx64 ", offset: 0x%" PRIx64
 				   ", alignment: 0x%" PRIx64 "\n",
-				   (long) req->tid,
+				   (long) req->tid, req->allocator_handle,
+				   al->ctx, al->vm,
 				   req->alloc.handle, req->alloc.size,
 				   resp->alloc.offset, req->alloc.alignment);
 			break;
@@ -352,9 +561,12 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 		case REQ_FREE:
 			resp->response_type = RESP_FREE;
 			resp->free.freed = ial->free(ial, req->free.handle);
-			alloc_info("<free> [tid: %ld] handle: %u, freed: %d\n",
-				   (long) req->tid, req->free.handle,
-				   resp->free.freed);
+			alloc_info("<free> [tid: %ld] ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u"
+				   ", handle: %u, freed: %d\n",
+				   (long) req->tid, req->allocator_handle,
+				   al->ctx, al->vm,
+				   req->free.handle, resp->free.freed);
 			break;
 
 		case REQ_IS_ALLOCATED:
@@ -364,8 +576,11 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 						      req->is_allocated.size,
 						      req->is_allocated.offset);
 			resp->is_allocated.allocated = allocated;
-			alloc_info("<is allocated> [tid: %ld] offset: 0x%" PRIx64
+			alloc_info("<is allocated> [tid: %ld] ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u"
+				   ", offset: 0x%" PRIx64
 				   ", allocated: %d\n", (long) req->tid,
+				   req->allocator_handle, al->ctx, al->vm,
 				   req->is_allocated.offset, allocated);
 			break;
 
@@ -376,10 +591,12 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 						req->reserve.start,
 						req->reserve.end);
 			resp->reserve.reserved = reserved;
-			alloc_info("<reserve> [tid: %ld] handle: %u, "
-				   "start: 0x%" PRIx64 ", end: 0x%" PRIx64
+			alloc_info("<reserve> [tid: %ld] ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u, handle: %u"
+				   ", start: 0x%" PRIx64 ", end: 0x%" PRIx64
 				   ", reserved: %d\n",
-				   (long) req->tid, req->reserve.handle,
+				   (long) req->tid, req->allocator_handle,
+				   al->ctx, al->vm, req->reserve.handle,
 				   req->reserve.start, req->reserve.end, reserved);
 			break;
 
@@ -390,10 +607,12 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 						    req->unreserve.start,
 						    req->unreserve.end);
 			resp->unreserve.unreserved = unreserved;
-			alloc_info("<unreserve> [tid: %ld] handle: %u, "
-				   "start: 0x%" PRIx64 ", end: 0x%" PRIx64
+			alloc_info("<unreserve> [tid: %ld] ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u, handle: %u"
+				   ", start: 0x%" PRIx64 ", end: 0x%" PRIx64
 				   ", unreserved: %d\n",
-				   (long) req->tid, req->unreserve.handle,
+				   (long) req->tid, req->allocator_handle,
+				   al->ctx, al->vm, req->unreserve.handle,
 				   req->unreserve.start, req->unreserve.end,
 				   unreserved);
 			break;
@@ -404,10 +623,12 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 						    req->is_reserved.start,
 						    req->is_reserved.end);
 			resp->is_reserved.reserved = reserved;
-			alloc_info("<is reserved> [tid: %ld] "
-				   "start: 0x%" PRIx64 ", end: 0x%" PRIx64
+			alloc_info("<is reserved> [tid: %ld] ahnd: %" PRIx64
+				   ", ctx: %u, vm: %u"
+				   ", start: 0x%" PRIx64 ", end: 0x%" PRIx64
 				   ", reserved: %d\n",
-				   (long) req->tid, req->is_reserved.start,
+				   (long) req->tid, req->allocator_handle,
+				   al->ctx, al->vm, req->is_reserved.start,
 				   req->is_reserved.end, reserved);
 			break;
 
@@ -420,10 +641,12 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 			if (allocated) {
 				resp->reserve_if_not_allocated.allocated = allocated;
 				alloc_info("<reserve if not allocated> [tid: %ld] "
-					   "handle: %u, size: 0x%lx, "
-					   "start: 0x%" PRIx64 ", end: 0x%" PRIx64
+					   "ahnd: %" PRIx64 ", ctx: %u, vm: %u"
+					   ", handle: %u, size: 0x%lx"
+					   ", start: 0x%" PRIx64 ", end: 0x%" PRIx64
 					   ", allocated: %d, reserved: %d\n",
-					   (long) req->tid, req->reserve.handle,
+					   (long) req->tid, req->allocator_handle,
+					   al->ctx, al->vm, req->reserve.handle,
 					   (long) size, req->reserve.start,
 					   req->reserve.end, allocated, false);
 				break;
@@ -435,13 +658,15 @@ static int handle_request(struct alloc_req *req, struct alloc_resp *resp)
 						req->reserve.end);
 			resp->reserve_if_not_allocated.reserved = reserved;
 			alloc_info("<reserve if not allocated> [tid: %ld] "
-				   "handle: %u, start: 0x%" PRIx64 ", end: 0x%" PRIx64
+				   "ahnd: %" PRIx64 ", ctx: %u, vm: %u"
+				   ", handle: %u, start: 0x%" PRIx64 ", end: 0x%" PRIx64
 				   ", allocated: %d, reserved: %d\n",
-				   (long) req->tid, req->reserve.handle,
+				   (long) req->tid, req->allocator_handle,
+				   al->ctx, al->vm,
+				   req->reserve.handle,
 				   req->reserve.start, req->reserve.end,
 				   false, reserved);
 			break;
-
 		}
 
 		if (req->request_type > REQ_CLOSE)
@@ -608,6 +833,33 @@ void intel_allocator_multiprocess_stop(void)
 	}
 }
 
+static uint64_t __intel_allocator_open_full(int fd, uint32_t ctx,
+					    uint32_t vm,
+					    uint64_t start, uint64_t end,
+					    uint8_t allocator_type,
+					    enum allocator_strategy strategy)
+{
+	struct alloc_req req = { .request_type = REQ_OPEN,
+				 .open.fd = fd,
+				 .open.ctx = ctx,
+				 .open.vm = vm,
+				 .open.start = start,
+				 .open.end = end,
+				 .open.allocator_type = allocator_type,
+				 .open.allocator_strategy = strategy };
+	struct alloc_resp resp;
+
+	/* Get child_tid only once at open() */
+	if (child_tid == -1)
+		child_tid = gettid();
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.open.allocator_handle);
+	igt_assert(resp.response_type == RESP_OPEN);
+
+	return resp.open.allocator_handle;
+}
+
 /**
  * intel_allocator_open_full:
  * @fd: i915 descriptor
@@ -641,24 +893,18 @@ uint64_t intel_allocator_open_full(int fd, uint32_t ctx,
 				   uint8_t allocator_type,
 				   enum allocator_strategy strategy)
 {
-	struct alloc_req req = { .request_type = REQ_OPEN,
-				 .open.fd = fd,
-				 .open.ctx = ctx,
-				 .open.start = start,
-				 .open.end = end,
-				 .open.allocator_type = allocator_type,
-				 .open.allocator_strategy = strategy };
-	struct alloc_resp resp;
-
-	/* Get child_tid only once at open() */
-	if (child_tid == -1)
-		child_tid = gettid();
-
-	igt_assert(handle_request(&req, &resp) == 0);
-	igt_assert(resp.open.allocator_handle);
-	igt_assert(resp.response_type == RESP_OPEN);
+	return __intel_allocator_open_full(fd, ctx, 0, start, end,
+					   allocator_type, strategy);
+}
 
-	return resp.open.allocator_handle;
+uint64_t intel_allocator_open_vm_full(int fd, uint32_t vm,
+				      uint64_t start, uint64_t end,
+				      uint8_t allocator_type,
+				      enum allocator_strategy strategy)
+{
+	igt_assert(vm != 0);
+	return __intel_allocator_open_full(fd, 0, vm, start, end,
+					   allocator_type, strategy);
 }
 
 /**
@@ -683,6 +929,30 @@ uint64_t intel_allocator_open(int fd, uint32_t ctx, uint8_t allocator_type)
 					 ALLOC_STRATEGY_HIGH_TO_LOW);
 }
 
+uint64_t intel_allocator_open_vm(int fd, uint32_t vm, uint8_t allocator_type)
+{
+	return intel_allocator_open_vm_full(fd, vm, 0, 0, allocator_type,
+					    ALLOC_STRATEGY_HIGH_TO_LOW);
+}
+
+uint64_t intel_allocator_open_vm_as(uint64_t allocator_handle, uint32_t new_vm)
+{
+	struct alloc_req req = { .request_type = REQ_OPEN_AS,
+				 .allocator_handle = allocator_handle,
+				 .open_as.new_vm = new_vm };
+	struct alloc_resp resp;
+
+	/* Get child_tid only once at open() */
+	if (child_tid == -1)
+		child_tid = gettid();
+
+	igt_assert(handle_request(&req, &resp) == 0);
+	igt_assert(resp.open_as.allocator_handle);
+	igt_assert(resp.response_type == RESP_OPEN_AS);
+
+	return resp.open.allocator_handle;
+}
+
 /**
  * intel_allocator_close:
  * @allocator_handle: handle to the allocator that will be closed
@@ -973,9 +1243,18 @@ void intel_allocator_print(uint64_t allocator_handle)
 	}
 }
 
-static bool equal_allocators(const void *key1, const void *key2)
+static bool equal_handles(const void *key1, const void *key2)
+{
+	const struct handle_entry *h1 = key1, *h2 = key2;
+	alloc_debug("h1: %llx, h2: %llx\n",
+		   (long long) h1->handle, (long long) h2->handle);
+
+	return h1->handle == h2->handle;
+}
+
+static bool equal_ctx(const void *key1, const void *key2)
 {
-	const struct intel_allocator *a1 = key1, *a2 = key2;
+	const struct allocator *a1 = key1, *a2 = key2;
 
 	alloc_debug("a1: <fd: %d, ctx: %u>, a2 <fd: %d, ctx: %u>\n",
 		   a1->fd, a1->ctx, a2->fd, a2->ctx);
@@ -983,31 +1262,53 @@ static bool equal_allocators(const void *key1, const void *key2)
 	return a1->fd == a2->fd && a1->ctx == a2->ctx;
 }
 
+static bool equal_vm(const void *key1, const void *key2)
+{
+	const struct allocator *a1 = key1, *a2 = key2;
+
+	alloc_debug("a1: <fd: %d, vm: %u>, a2 <fd: %d, vm: %u>\n",
+		   a1->fd, a1->vm, a2->fd, a2->vm);
+
+	return a1->fd == a2->fd && a1->vm == a2->vm;
+}
+
 /*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
 
-static inline uint64_t hash_allocators(const void *val, unsigned int bits)
+static inline uint64_t hash_handles(const void *val, unsigned int bits)
 {
-	uint64_t hash = ((struct intel_allocator *) val)->fd;
+	uint64_t hash = ((struct handle_entry *) val)->handle;
 
 	hash = hash * GOLDEN_RATIO_PRIME_64;
 	return hash >> (64 - bits);
 }
 
-static void __free_allocators(void)
+static inline uint64_t hash_instance(const void *val, unsigned int bits)
+{
+	uint64_t hash = ((struct allocator *) val)->fd;
+
+	hash = hash * GOLDEN_RATIO_PRIME_64;
+	return hash >> (64 - bits);
+}
+
+static void __free_maps(struct igt_map *map, bool close_allocators)
 {
 	struct igt_map_entry *pos;
-	struct intel_allocator *ial;
+	struct igt_hlist_node *tmp;
+	const struct handle_entry *h;
 	int i;
 
-	if (allocators_map) {
-		igt_map_for_each(allocators_map, i, pos) {
-			ial = pos->value;
-			ial->destroy(ial);
+	if (!map)
+		return;
+
+	if (close_allocators)
+		igt_map_for_each_safe(map, i, tmp, pos) {
+			h = pos->key;
+			allocator_close(h->handle);
 		}
-	}
 
-	igt_map_free(allocators_map);
+	igt_map_free(map);
+	free(map);
 }
 
 /**
@@ -1024,15 +1325,23 @@ void intel_allocator_init(void)
 	allocator_pid = getpid();
 	alloc_info("Allocator pid: %ld\n", (long) allocator_pid);
 
-	if (allocators_map) {
-		__free_allocators();
-		free(allocators_map);
-	}
+	__free_maps(handles, true);
+	__free_maps(ctx_map, false);
+	__free_maps(vm_map, false);
+
+	handles = calloc(sizeof(*handles), 1);
+	igt_assert(handles);
+
+	ctx_map = calloc(sizeof(*ctx_map), 1);
+	igt_assert(ctx_map);
 
-	allocators_map = calloc(sizeof(*allocators_map), 1);
-	igt_assert(allocators_map);
+	vm_map = calloc(sizeof(*vm_map), 1);
+	igt_assert(vm_map);
 
-	__igt_map_init(allocators_map, equal_allocators, hash_allocators, 8);
+	atomic_init(&next_handle, 1);
+	__igt_map_init(handles, equal_handles, hash_handles, 8);
+	__igt_map_init(ctx_map, equal_ctx, hash_instance, 8);
+	__igt_map_init(vm_map, equal_vm, hash_instance, 8);
 
 	channel = intel_allocator_get_msgchannel(CHANNEL_SYSVIPC_MSGQUEUE);
 }
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
index 1298ce4a0..e6d16c15b 100644
--- a/lib/intel_allocator.h
+++ b/lib/intel_allocator.h
@@ -78,7 +78,6 @@ enum allocator_strategy {
 
 struct intel_allocator {
 	int fd;
-	uint32_t ctx;
 	uint8_t type;
 	enum allocator_strategy strategy;
 	_Atomic(int32_t) refcount;
@@ -119,6 +118,13 @@ uint64_t intel_allocator_open_full(int fd, uint32_t ctx,
 				   uint64_t start, uint64_t end,
 				   uint8_t allocator_type,
 				   enum allocator_strategy strategy);
+uint64_t intel_allocator_open_vm(int fd, uint32_t vm, uint8_t allocator_type);
+uint64_t intel_allocator_open_vm_full(int fd, uint32_t vm,
+				      uint64_t start, uint64_t end,
+				      uint8_t allocator_type,
+				      enum allocator_strategy strategy);
+
+uint64_t intel_allocator_open_vm_as(uint64_t allocator_handle, uint32_t new_vm);
 bool intel_allocator_close(uint64_t allocator_handle);
 void intel_allocator_get_address_range(uint64_t allocator_handle,
 				       uint64_t *startp, uint64_t *endp);
diff --git a/lib/intel_allocator_msgchannel.h b/lib/intel_allocator_msgchannel.h
index ad5a9e901..ac6edfb9e 100644
--- a/lib/intel_allocator_msgchannel.h
+++ b/lib/intel_allocator_msgchannel.h
@@ -13,6 +13,7 @@
 enum reqtype {
 	REQ_STOP,
 	REQ_OPEN,
+	REQ_OPEN_AS,
 	REQ_CLOSE,
 	REQ_ADDRESS_RANGE,
 	REQ_ALLOC,
@@ -26,6 +27,7 @@ enum reqtype {
 
 enum resptype {
 	RESP_OPEN,
+	RESP_OPEN_AS,
 	RESP_CLOSE,
 	RESP_ADDRESS_RANGE,
 	RESP_ALLOC,
@@ -48,12 +50,17 @@ struct alloc_req {
 		struct {
 			int fd;
 			uint32_t ctx;
+			uint32_t vm;
 			uint64_t start;
 			uint64_t end;
 			uint8_t allocator_type;
 			uint8_t allocator_strategy;
 		} open;
 
+		struct {
+			uint32_t new_vm;
+		} open_as;
+
 		struct {
 			uint32_t handle;
 			uint64_t size;
@@ -91,7 +98,7 @@ struct alloc_resp {
 	union {
 		struct {
 			uint64_t allocator_handle;
-		} open;
+		} open, open_as;
 
 		struct {
 			bool is_empty;
diff --git a/lib/intel_allocator_random.c b/lib/intel_allocator_random.c
index 15b930af1..d8c80e237 100644
--- a/lib/intel_allocator_random.c
+++ b/lib/intel_allocator_random.c
@@ -10,7 +10,7 @@
 #include "igt_rand.h"
 #include "intel_allocator.h"
 
-struct intel_allocator *intel_allocator_random_create(int fd, uint32_t ctx);
+struct intel_allocator *intel_allocator_random_create(int fd);
 
 struct intel_allocator_random {
 	uint64_t bias;
@@ -144,8 +144,8 @@ static void intel_allocator_random_print(struct intel_allocator *ial, bool full)
 
 	(void) full;
 
-	igt_info("<fd: %d, ctx: %u> allocated objects: %" PRIx64 "\n",
-		 ial->fd, ial->ctx, ialr->allocated_objects);
+	igt_info("<ial: %p, fd: %d> allocated objects: %" PRIx64 "\n",
+		 ial, ial->fd, ialr->allocated_objects);
 }
 
 static bool intel_allocator_random_is_empty(struct intel_allocator *ial)
@@ -155,7 +155,7 @@ static bool intel_allocator_random_is_empty(struct intel_allocator *ial)
 	return !ialr->allocated_objects;
 }
 
-struct intel_allocator *intel_allocator_random_create(int fd, uint32_t ctx)
+struct intel_allocator *intel_allocator_random_create(int fd)
 {
 	struct intel_allocator *ial;
 	struct intel_allocator_random *ialr;
@@ -165,7 +165,6 @@ struct intel_allocator *intel_allocator_random_create(int fd, uint32_t ctx)
 	igt_assert(ial);
 
 	ial->fd = fd;
-	ial->ctx = ctx;
 	ial->get_address_range = intel_allocator_random_get_address_range;
 	ial->alloc = intel_allocator_random_alloc;
 	ial->free = intel_allocator_random_free;
diff --git a/lib/intel_allocator_simple.c b/lib/intel_allocator_simple.c
index 1163b42a6..cc207c8e9 100644
--- a/lib/intel_allocator_simple.c
+++ b/lib/intel_allocator_simple.c
@@ -18,10 +18,9 @@
 #define RESERVED 4096
 
 /* Avoid compilation warning */
-struct intel_allocator *intel_allocator_simple_create(int fd, uint32_t ctx);
+struct intel_allocator *intel_allocator_simple_create(int fd);
 struct intel_allocator *
-intel_allocator_simple_create_full(int fd, uint32_t ctx,
-				   uint64_t start, uint64_t end,
+intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
 				   enum allocator_strategy strategy);
 
 struct simple_vma_heap {
@@ -583,9 +582,9 @@ static bool intel_allocator_simple_is_empty(struct intel_allocator *ial)
 {
 	struct intel_allocator_simple *ials = ial->priv;
 
-	igt_debug("<fd: %d, ctx: %u> objects: %" PRId64
+	igt_debug("<ial: %p, fd: %d> objects: %" PRId64
 		  ", reserved_areas: %" PRId64 "\n",
-		  ial->fd, ial->ctx,
+		  ial, ial->fd,
 		  ials->allocated_objects, ials->reserved_areas);
 
 	return !ials->allocated_objects && !ials->reserved_areas;
@@ -607,8 +606,8 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
 	igt_assert(ials);
 	heap = &ials->heap;
 
-	igt_info("intel_allocator_simple <fd:%d ctx:%d> on "
-		 "[0x%"PRIx64" : 0x%"PRIx64"]:\n", ial->fd, ial->ctx,
+	igt_info("intel_allocator_simple <ial: %p, fd: %d> on "
+		 "[0x%"PRIx64" : 0x%"PRIx64"]:\n", ial, ial->fd,
 		 ials->start, ials->end);
 
 	if (full) {
@@ -673,20 +672,18 @@ static void intel_allocator_simple_print(struct intel_allocator *ial, bool full)
 }
 
 static struct intel_allocator *
-__intel_allocator_simple_create(int fd, uint32_t ctx,
-				uint64_t start, uint64_t end,
+__intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
 				enum allocator_strategy strategy)
 {
 	struct intel_allocator *ial;
 	struct intel_allocator_simple *ials;
 
-	igt_debug("Using simple allocator <fd: %d, ctx: %u>\n", fd, ctx);
+	igt_debug("Using simple allocator\n");
 
 	ial = calloc(1, sizeof(*ial));
 	igt_assert(ial);
 
 	ial->fd = fd;
-	ial->ctx = ctx;
 	ial->get_address_range = intel_allocator_simple_get_address_range;
 	ial->alloc = intel_allocator_simple_alloc;
 	ial->free = intel_allocator_simple_free;
@@ -719,7 +716,7 @@ __intel_allocator_simple_create(int fd, uint32_t ctx,
 }
 
 struct intel_allocator *
-intel_allocator_simple_create(int fd, uint32_t ctx)
+intel_allocator_simple_create(int fd)
 {
 	uint64_t gtt_size = gem_aperture_size(fd);
 
@@ -728,13 +725,12 @@ intel_allocator_simple_create(int fd, uint32_t ctx)
 	else
 		gtt_size -= RESERVED;
 
-	return __intel_allocator_simple_create(fd, ctx, 0, gtt_size,
+	return __intel_allocator_simple_create(fd, 0, gtt_size,
 					       ALLOC_STRATEGY_HIGH_TO_LOW);
 }
 
 struct intel_allocator *
-intel_allocator_simple_create_full(int fd, uint32_t ctx,
-				   uint64_t start, uint64_t end,
+intel_allocator_simple_create_full(int fd, uint64_t start, uint64_t end,
 				   enum allocator_strategy strategy)
 {
 	uint64_t gtt_size = gem_aperture_size(fd);
@@ -744,5 +740,5 @@ intel_allocator_simple_create_full(int fd, uint32_t ctx,
 		gtt_size /= 2;
 	igt_assert(end - start <= gtt_size);
 
-	return __intel_allocator_simple_create(fd, ctx, start, end, strategy);
+	return __intel_allocator_simple_create(fd, start, end, strategy);
 }
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index b76a8bb87..9fa959f35 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -1290,13 +1290,15 @@ __intel_bb_create(int i915, uint32_t ctx, uint32_t size, bool do_relocs,
 		do_relocs = true;
 
 	/* if relocs are set we won't use an allocator */
-	if (do_relocs)
+	if (do_relocs) {
 		allocator_type = INTEL_ALLOCATOR_NONE;
-	else
+	} else {
 		ibb->allocator_handle = intel_allocator_open_full(i915, ctx,
 								  start, end,
 								  allocator_type,
 								  strategy);
+		//intel_allocator_ref(ibb->allocator_handle);
+	}
 	ibb->allocator_type = allocator_type;
 	ibb->allocator_strategy = strategy;
 	ibb->i915 = i915;
@@ -1305,6 +1307,7 @@ __intel_bb_create(int i915, uint32_t ctx, uint32_t size, bool do_relocs,
 	ibb->size = size;
 	ibb->alignment = 4096;
 	ibb->ctx = ctx;
+	ibb->vm_id = 0;
 	ibb->batch = calloc(1, size);
 	igt_assert(ibb->batch);
 	ibb->ptr = ibb->batch;
@@ -1498,13 +1501,20 @@ static void __intel_bb_destroy_cache(struct intel_bb *ibb)
 	ibb->root = NULL;
 }
 
+static void __intel_bb_detach_intel_bufs(struct intel_bb *ibb)
+{
+	struct intel_buf *entry, *tmp;
+
+	igt_list_for_each_entry_safe(entry, tmp, &ibb->intel_bufs, link)
+		intel_bb_detach_intel_buf(ibb, entry);
+}
+
 static void __intel_bb_remove_intel_bufs(struct intel_bb *ibb)
 {
 	struct intel_buf *entry, *tmp;
 
-	igt_list_for_each_entry_safe(entry, tmp, &ibb->intel_bufs, link) {
+	igt_list_for_each_entry_safe(entry, tmp, &ibb->intel_bufs, link)
 		intel_bb_remove_intel_buf(ibb, entry);
-	}
 }
 
 /**
@@ -1627,6 +1637,50 @@ int intel_bb_sync(struct intel_bb *ibb)
 	return ret;
 }
 
+uint64_t intel_bb_assign_vm(struct intel_bb *ibb, uint64_t allocator,
+			    uint32_t vm_id, bool close_previous)
+{
+	struct drm_i915_gem_context_param arg = {
+		.param = I915_CONTEXT_PARAM_VM,
+	};
+	uint64_t prev_allocator = ibb->allocator_handle;
+	bool closed = false;
+
+	/* Cannot switch if someone keeps bb refcount */
+	igt_assert(ibb->refcount == 1);
+
+	/* Detach intel_bufs and remove bb handle */
+	__intel_bb_detach_intel_bufs(ibb);
+	intel_bb_remove_object(ibb, ibb->handle, ibb->batch_offset, ibb->size);
+
+	/* Cache + objects are not valid after change anymore */
+	__intel_bb_destroy_objects(ibb);
+	__intel_bb_destroy_cache(ibb);
+
+	/* Attach new allocator */
+	//intel_allocator_unref(prev_allocator);
+	ibb->allocator_handle = allocator;
+	//intel_allocator_ref(allocator);
+
+	/* setparam */
+	ibb->vm_id = vm_id;
+	arg.ctx_id = ibb->ctx;
+	arg.value = vm_id;
+	gem_context_set_param(ibb->i915, &arg);
+
+	/* Recreate bb */
+	intel_bb_reset(ibb, false);
+
+	if (close_previous) {
+		closed = intel_allocator_close(prev_allocator);
+
+		if (!closed)
+			igt_debug("Previous allocator still has references, cannot close\n");
+	}
+
+	return closed ? 0 : prev_allocator;
+}
+
 /*
  * intel_bb_print:
  * @ibb: pointer to intel_bb
@@ -1974,6 +2028,19 @@ intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *bu
 	return __intel_bb_add_intel_buf(ibb, buf, alignment, write);
 }
 
+void intel_bb_detach_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
+{
+	igt_assert(ibb);
+	igt_assert(buf);
+	igt_assert(!buf->ibb || buf->ibb == ibb);
+
+	if (!igt_list_empty(&buf->link)) {
+		buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
+		buf->ibb = NULL;
+		igt_list_del_init(&buf->link);
+	}
+}
+
 bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
 {
 	bool removed;
@@ -1982,11 +2049,13 @@ bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
 	igt_assert(buf);
 	igt_assert(!buf->ibb || buf->ibb == ibb);
 
+	if (igt_list_empty(&buf->link))
+		return false;
+
 	removed = intel_bb_remove_object(ibb, buf->handle,
 					 buf->addr.offset,
 					 intel_buf_bo_size(buf));
-
-	if (removed && !igt_list_empty(&buf->link)) {
+	if (removed) {
 		buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
 		buf->ibb = NULL;
 		igt_list_del_init(&buf->link);
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index f8a38967b..143a93846 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -465,6 +465,7 @@ struct intel_bb {
 	bool uses_full_ppgtt;
 
 	uint32_t ctx;
+	uint32_t vm_id;
 
 	/* Cache */
 	void *root;
@@ -521,29 +522,15 @@ static inline void intel_bb_unref(struct intel_bb *ibb)
 
 void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache);
 int intel_bb_sync(struct intel_bb *ibb);
+
+uint64_t intel_bb_assign_vm(struct intel_bb *ibb, uint64_t allocator,
+			    uint32_t vm_id, bool close_previous);
+
 void intel_bb_print(struct intel_bb *ibb);
 void intel_bb_dump(struct intel_bb *ibb, const char *filename);
 void intel_bb_set_debug(struct intel_bb *ibb, bool debug);
 void intel_bb_set_dump_base64(struct intel_bb *ibb, bool dump);
 
-/*
-static inline uint64_t
-intel_bb_set_default_object_alignment(struct intel_bb *ibb, uint64_t alignment)
-{
-	uint64_t old = ibb->alignment;
-
-	ibb->alignment = alignment;
-
-	return old;
-}
-
-static inline uint64_t
-intel_bb_get_default_object_alignment(struct intel_bb *ibb)
-{
-	return ibb->alignment;
-}
-*/
-
 static inline uint32_t intel_bb_offset(struct intel_bb *ibb)
 {
 	return (uint32_t) ((uint8_t *) ibb->ptr - (uint8_t *) ibb->batch);
@@ -601,6 +588,7 @@ intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf, bool write);
 struct drm_i915_gem_exec_object2 *
 intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *buf,
 				      uint64_t alignment, bool write);
+void intel_bb_detach_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
 bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
 void intel_bb_intel_buf_list(struct intel_bb *ibb);
 struct drm_i915_gem_exec_object2 *
diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
index 2de7a0baa..0a1aec09e 100644
--- a/tests/i915/api_intel_allocator.c
+++ b/tests/i915/api_intel_allocator.c
@@ -26,87 +26,82 @@ static inline uint32_t gem_handle_gen(void)
 
 static void alloc_simple(int fd)
 {
-	uint64_t ialh;
-	uint64_t offset0, offset1;
+	uint64_t ahnd;
+	uint64_t offset0, offset1, size = 0x1000, align = 0x1000;
 	bool is_allocated, freed;
 
-	ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
 
-	offset0 = intel_allocator_alloc(ialh, 1, 0x1000, 0x1000);
-	offset1 = intel_allocator_alloc(ialh, 1, 0x1000, 0x1000);
+	offset0 = intel_allocator_alloc(ahnd, 1, size, align);
+	offset1 = intel_allocator_alloc(ahnd, 1, size, align);
 	igt_assert(offset0 == offset1);
 
-	is_allocated = intel_allocator_is_allocated(ialh, 1, 0x1000, offset0);
+	is_allocated = intel_allocator_is_allocated(ahnd, 1, size, offset0);
 	igt_assert(is_allocated);
 
-	freed = intel_allocator_free(ialh, 1);
+	freed = intel_allocator_free(ahnd, 1);
 	igt_assert(freed);
 
-	is_allocated = intel_allocator_is_allocated(ialh, 1, 0x1000, offset0);
+	is_allocated = intel_allocator_is_allocated(ahnd, 1, size, offset0);
 	igt_assert(!is_allocated);
 
-	freed = intel_allocator_free(ialh, 1);
+	freed = intel_allocator_free(ahnd, 1);
 	igt_assert(!freed);
 
-	intel_allocator_close(ialh);
+	igt_assert_eq(intel_allocator_close(ahnd), true);
 }
 
 static void reserve_simple(int fd)
 {
-	uint64_t ialh;
-	uint64_t start;
+	uint64_t ahnd, start, size = 0x1000;
 	bool reserved, unreserved;
 
-	ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
-	intel_allocator_get_address_range(ialh, &start, NULL);
+	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	intel_allocator_get_address_range(ahnd, &start, NULL);
 
-	reserved = intel_allocator_reserve(ialh, 0, 0x1000, start);
+	reserved = intel_allocator_reserve(ahnd, 0, size, start);
 	igt_assert(reserved);
 
-	reserved = intel_allocator_is_reserved(ialh, 0x1000, start);
+	reserved = intel_allocator_is_reserved(ahnd, size, start);
 	igt_assert(reserved);
 
-	reserved = intel_allocator_reserve(ialh, 0, 0x1000, start);
+	reserved = intel_allocator_reserve(ahnd, 0, size, start);
 	igt_assert(!reserved);
 
-	unreserved = intel_allocator_unreserve(ialh, 0, 0x1000, start);
+	unreserved = intel_allocator_unreserve(ahnd, 0, size, start);
 	igt_assert(unreserved);
 
-	reserved = intel_allocator_is_reserved(ialh, 0x1000, start);
+	reserved = intel_allocator_is_reserved(ahnd, size, start);
 	igt_assert(!reserved);
 
-	intel_allocator_close(ialh);
+	igt_assert_eq(intel_allocator_close(ahnd), true);
 }
 
 static void reserve(int fd, uint8_t type)
 {
-	struct intel_allocator *ial;
 	struct test_obj obj;
+	uint64_t ahnd, offset = 0x40000, size = 0x1000;
 
-	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+	ahnd = intel_allocator_open(fd, 0, type);
 
-	igt_assert(ial->reserve(ial, 0, 0x40000, 0x800000));
-	/* try reserve once again */
-	igt_assert_eq(ial->reserve(ial, 0, 0x40040, 0x700000), false);
+	igt_assert_eq(intel_allocator_reserve(ahnd, 0, size, offset), true);
+	/* try overlapping won't succeed */
+	igt_assert_eq(intel_allocator_reserve(ahnd, 0, size, offset + size/2), false);
 
 	obj.handle = gem_handle_gen();
 	obj.size = OBJ_SIZE;
-	obj.offset = ial->alloc(ial, obj.handle, obj.size, 0);
+	obj.offset = intel_allocator_alloc(ahnd, obj.handle, obj.size, 0);
 
-	igt_assert_eq(ial->reserve(ial, 0, obj.offset,
-				obj.offset + obj.size), false);
-	ial->free(ial, obj.handle);
-	igt_assert_eq(ial->reserve(ial, 0, obj.offset,
-				obj.offset + obj.size), true);
+	igt_assert_eq(intel_allocator_reserve(ahnd, 0, obj.size, obj.offset), false);
+	intel_allocator_free(ahnd, obj.handle);
+	igt_assert_eq(intel_allocator_reserve(ahnd, 0, obj.size, obj.offset), true);
 
-	ial->unreserve(ial, 0, obj.offset, obj.offset + obj.size);
-	ial->unreserve(ial, 0, 0x40000, 0x800000);
-	igt_assert(ial->reserve(ial, 0, 0x40040, 0x700000));
-	ial->unreserve(ial, 0, 0x40040, 0x700000);
+	igt_assert_eq(intel_allocator_unreserve(ahnd, 0, obj.size, obj.offset), true);
+	igt_assert_eq(intel_allocator_unreserve(ahnd, 0, size, offset), true);
+	igt_assert_eq(intel_allocator_reserve(ahnd, 0, size, offset + size/2), true);
+	igt_assert_eq(intel_allocator_unreserve(ahnd, 0, size, offset + size/2), true);
 
-	igt_assert(ial->is_empty(ial));
-
-	intel_allocator_close(to_user_pointer(ial));
+	igt_assert_eq(intel_allocator_close(ahnd), true);
 }
 
 static bool overlaps(struct test_obj *buf1, struct test_obj *buf2)
@@ -122,18 +117,18 @@ static bool overlaps(struct test_obj *buf1, struct test_obj *buf2)
 static void basic_alloc(int fd, int cnt, uint8_t type)
 {
 	struct test_obj *obj;
-	struct intel_allocator *ial;
+	uint64_t ahnd;
 	int i, j;
 
-	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+	ahnd = intel_allocator_open(fd, 0, type);
 	obj = malloc(sizeof(struct test_obj) * cnt);
 
 	for (i = 0; i < cnt; i++) {
 		igt_progress("allocating objects: ", i, cnt);
 		obj[i].handle = gem_handle_gen();
 		obj[i].size = OBJ_SIZE;
-		obj[i].offset = ial->alloc(ial, obj[i].handle,
-					   obj[i].size, 4096);
+		obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
+						      obj[i].size, 4096);
 		igt_assert_eq(obj[i].offset % 4096, 0);
 	}
 
@@ -152,60 +147,57 @@ static void basic_alloc(int fd, int cnt, uint8_t type)
 
 	for (i = 0; i < cnt; i++) {
 		igt_progress("freeing objects: ", i, cnt);
-		ial->free(ial, obj[i].handle);
+		intel_allocator_free(ahnd, obj[i].handle);
 	}
 
-	igt_assert(ial->is_empty(ial));
-
 	free(obj);
-	intel_allocator_close(to_user_pointer(ial));
+	igt_assert_eq(intel_allocator_close(ahnd), true);
 }
 
 static void reuse(int fd, uint8_t type)
 {
 	struct test_obj obj[128], tmp;
-	struct intel_allocator *ial;
-	uint64_t prev_offset;
+	uint64_t ahnd, prev_offset;
 	int i;
 
-	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+	ahnd = intel_allocator_open(fd, 0, type);
 
 	for (i = 0; i < 128; i++) {
 		obj[i].handle = gem_handle_gen();
 		obj[i].size = OBJ_SIZE;
-		obj[i].offset = ial->alloc(ial, obj[i].handle,
-					   obj[i].size, 0x40);
+		obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
+						      obj[i].size, 0x40);
 	}
 
 	/* check simple reuse */
 	for (i = 0; i < 128; i++) {
 		prev_offset = obj[i].offset;
-		obj[i].offset = ial->alloc(ial, obj[i].handle, obj[i].size, 0);
+		obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
+						      obj[i].size, 0);
 		igt_assert(prev_offset == obj[i].offset);
 	}
 	i--;
 
-	/* free bo prevously alloced */
-	ial->free(ial, obj[i].handle);
+	/* free previously allocated bo */
+	intel_allocator_free(ahnd, obj[i].handle);
 	/* alloc different buffer to fill freed hole */
 	tmp.handle = gem_handle_gen();
-	tmp.offset = ial->alloc(ial, tmp.handle, OBJ_SIZE, 0);
+	tmp.offset = intel_allocator_alloc(ahnd, tmp.handle, OBJ_SIZE, 0);
 	igt_assert(prev_offset == tmp.offset);
 
-	obj[i].offset = ial->alloc(ial, obj[i].handle, obj[i].size, 0);
+	obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
+					      obj[i].size, 0);
 	igt_assert(prev_offset != obj[i].offset);
-	ial->free(ial, tmp.handle);
+	intel_allocator_free(ahnd, tmp.handle);
 
 	for (i = 0; i < 128; i++)
-		ial->free(ial, obj[i].handle);
+		intel_allocator_free(ahnd, obj[i].handle);
 
-	igt_assert(ial->is_empty(ial));
-
-	intel_allocator_close(to_user_pointer(ial));
+	igt_assert_eq(intel_allocator_close(ahnd), true);
 }
 
 struct ial_thread_args {
-	struct intel_allocator *ial;
+	uint64_t ahnd;
 	pthread_t thread;
 	uint32_t *handles;
 	uint64_t *offsets;
@@ -221,10 +213,8 @@ static void *alloc_bo_in_thread(void *arg)
 
 	for (i = a->idx; i < a->count; i += a->threads) {
 		a->handles[i] = gem_handle_gen();
-		pthread_mutex_lock(&a->ial->mutex);
-		a->offsets[i] = a->ial->alloc(a->ial, a->handles[i], OBJ_SIZE,
-					      1UL << ((random() % 20) + 1));
-		pthread_mutex_unlock(&a->ial->mutex);
+		a->offsets[i] = intel_allocator_alloc(a->ahnd, a->handles[i], OBJ_SIZE,
+						      1UL << ((random() % 20) + 1));
 	}
 
 	return NULL;
@@ -236,9 +226,7 @@ static void *free_bo_in_thread(void *arg)
 	int i;
 
 	for (i = (a->idx + 1) % a->threads; i < a->count; i += a->threads) {
-		pthread_mutex_lock(&a->ial->mutex);
-		a->ial->free(a->ial, a->handles[i]);
-		pthread_mutex_unlock(&a->ial->mutex);
+		intel_allocator_free(a->ahnd, a->handles[i]);
 	}
 
 	return NULL;
@@ -248,21 +236,20 @@ static void *free_bo_in_thread(void *arg)
 
 static void parallel_one(int fd, uint8_t type)
 {
-	struct intel_allocator *ial;
 	struct ial_thread_args a[THREADS];
 	uint32_t *handles;
-	uint64_t *offsets;
+	uint64_t ahnd, *offsets;
 	int count, i;
 
 	srandom(0xdeadbeef);
-	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+	ahnd = intel_allocator_open(fd, 0, type);
 	count = 1UL << 12;
 
 	handles = malloc(sizeof(uint32_t) * count);
 	offsets = calloc(1, sizeof(uint64_t) * count);
 
 	for (i = 0; i < THREADS; i++) {
-		a[i].ial = ial;
+		a[i].ahnd = ahnd;
 		a[i].handles = handles;
 		a[i].offsets = offsets;
 		a[i].count = count;
@@ -281,7 +268,7 @@ static void parallel_one(int fd, uint8_t type)
 			break;
 
 		igt_assert_eq(offsets[i],
-			      a->ial->alloc(ial, handles[i], OBJ_SIZE, 0));
+			      intel_allocator_alloc(a->ahnd, handles[i], OBJ_SIZE, 0));
 	}
 
 	for (i = 0; i < THREADS; i++)
@@ -290,18 +277,10 @@ static void parallel_one(int fd, uint8_t type)
 	for (i = 0; i < THREADS; i++)
 		pthread_join(a[i].thread, NULL);
 
-	/* Check if all offsets where objects were are free */
-	for (i = 0; i < count; i++) {
-		if (type == INTEL_ALLOCATOR_RANDOM)
-			break;
-
-		igt_assert(ial->reserve(ial, 0, offsets[i], offsets[i] + 1));
-	}
-
 	free(handles);
 	free(offsets);
 
-	intel_allocator_close(to_user_pointer(ial));
+	igt_assert_eq(intel_allocator_close(ahnd), true);
 }
 
 #define SIMPLE_GROUP_ALLOCS 8
@@ -403,7 +382,7 @@ static void fork_simple_stress(int fd, bool two_level_inception)
 	igt_assert_f(are_empty, "Allocators were not emptied\n");
 }
 
-static void __reopen_allocs(int fd1, int fd2)
+static void __reopen_allocs(int fd1, int fd2, bool check)
 {
 	uint64_t ahnd0, ahnd1, ahnd2;
 
@@ -411,11 +390,18 @@ static void __reopen_allocs(int fd1, int fd2)
 	ahnd1 = intel_allocator_open(fd2, 0, INTEL_ALLOCATOR_SIMPLE);
 	ahnd2 = intel_allocator_open(fd2, 0, INTEL_ALLOCATOR_SIMPLE);
 	igt_assert(ahnd0 != ahnd1);
-	igt_assert(ahnd1 == ahnd2);
-
-	intel_allocator_close(ahnd0);
-	intel_allocator_close(ahnd1);
-	intel_allocator_close(ahnd2);
+	igt_assert(ahnd1 != ahnd2);
+
+	/* in fork mode we can have more references, so skip check */
+	if (!check) {
+		intel_allocator_close(ahnd0);
+		intel_allocator_close(ahnd1);
+		intel_allocator_close(ahnd2);
+	} else {
+		igt_assert_eq(intel_allocator_close(ahnd0), true);
+		igt_assert_eq(intel_allocator_close(ahnd1), false);
+		igt_assert_eq(intel_allocator_close(ahnd2), true);
+	}
 }
 
 static void reopen(int fd)
@@ -426,7 +412,7 @@ static void reopen(int fd)
 
 	fd2 = gem_reopen_driver(fd);
 
-	__reopen_allocs(fd, fd2);
+	__reopen_allocs(fd, fd2, true);
 
 	close(fd2);
 }
@@ -444,13 +430,16 @@ static void reopen_fork(int fd)
 
 	igt_fork(child, 1) {
 		igt_until_timeout(REOPEN_TIMEOUT)
-			__reopen_allocs(fd, fd2);
+			__reopen_allocs(fd, fd2, false);
 	}
 	igt_until_timeout(REOPEN_TIMEOUT)
-		__reopen_allocs(fd, fd2);
+		__reopen_allocs(fd, fd2, false);
 
 	igt_waitchildren();
 
+	/* Check references at the end */
+	__reopen_allocs(fd, fd2, true);
+
 	close(fd2);
 
 	intel_allocator_multiprocess_stop();
@@ -544,6 +533,40 @@ static void execbuf_with_allocator(int fd)
 	igt_assert(intel_allocator_close(ahnd) == true);
 }
 
+static void open_vm(int fd)
+{
+	uint64_t ahnd[4], offset[4], size = 0x1000;
+	int i, n = ARRAY_SIZE(ahnd);
+
+	ahnd[0] = intel_allocator_open_vm(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+	ahnd[1] = intel_allocator_open_vm(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+	ahnd[2] = intel_allocator_open_vm_as(ahnd[1], 2);
+	ahnd[3] = intel_allocator_open(fd, 3, INTEL_ALLOCATOR_SIMPLE);
+
+	offset[0] = intel_allocator_alloc(ahnd[0], 1, size, 0);
+	offset[1] = intel_allocator_alloc(ahnd[1], 2, size, 0);
+	igt_assert(offset[0] != offset[1]);
+
+	offset[2] = intel_allocator_alloc(ahnd[2], 3, size, 0);
+	igt_assert(offset[0] != offset[2] && offset[1] != offset[2]);
+
+	offset[3] = intel_allocator_alloc(ahnd[3], 1, size, 0);
+	igt_assert(offset[0] == offset[3]);
+
+	/*
+	 * As ahnd[0-2] lead to same allocator check can we free all handles
+	 * using selected ahnd.
+	 */
+	intel_allocator_free(ahnd[0], 1);
+	intel_allocator_free(ahnd[0], 2);
+	intel_allocator_free(ahnd[0], 3);
+	intel_allocator_free(ahnd[3], 1);
+
+	for (i = 0; i < n - 1; i++)
+		igt_assert_eq(intel_allocator_close(ahnd[i]), (i == n - 2));
+	igt_assert_eq(intel_allocator_close(ahnd[n-1]), true);
+}
+
 struct allocators {
 	const char *name;
 	uint8_t type;
@@ -570,9 +593,12 @@ igt_main
 	igt_subtest_f("reserve-simple")
 		reserve_simple(fd);
 
-	igt_subtest_f("print")
+	igt_subtest_f("print-random")
 		basic_alloc(fd, 1UL << 2, INTEL_ALLOCATOR_RANDOM);
 
+	igt_subtest_f("print-simple")
+		basic_alloc(fd, 1UL << 2, INTEL_ALLOCATOR_SIMPLE);
+
 	igt_subtest_f("reuse")
 		reuse(fd, INTEL_ALLOCATOR_SIMPLE);
 
@@ -627,6 +653,9 @@ igt_main
 	igt_subtest_f("execbuf-with-allocator")
 		execbuf_with_allocator(fd);
 
+	igt_subtest_f("open-vm")
+		open_vm(fd);
+
 	igt_fixture
 		close(fd);
 }
diff --git a/tests/i915/api_intel_bb.c b/tests/i915/api_intel_bb.c
index b62957b34..57a99b360 100644
--- a/tests/i915/api_intel_bb.c
+++ b/tests/i915/api_intel_bb.c
@@ -37,6 +37,7 @@
 #include <zlib.h>
 #include "intel_bufops.h"
 #include "sw_sync.h"
+#include "i915/gem_vm.h"
 
 #define PAGE_SIZE 4096
 
@@ -237,6 +238,93 @@ static void bb_with_allocator(struct buf_ops *bops)
 	intel_bb_destroy(ibb);
 }
 
+static void bb_with_vm(struct buf_ops *bops)
+{
+	int i915 = buf_ops_get_fd(bops);
+	struct drm_i915_gem_context_param arg = {
+		.param = I915_CONTEXT_PARAM_VM,
+	};
+	struct intel_bb *ibb;
+	struct intel_buf *src, *dst, *gap;
+	uint32_t ctx = 0, vm_id1, vm_id2;
+	uint64_t prev_vm, vm;
+	uint64_t src1_addr, dst1_addr;
+	uint64_t src2_addr, dst2_addr;
+	uint64_t src3_addr, dst3_addr;
+	uint64_t src4_addr, dst4_addr;
+
+	igt_require(gem_uses_full_ppgtt(i915));
+
+	ibb = intel_bb_create_with_allocator(i915, ctx, PAGE_SIZE,
+					     INTEL_ALLOCATOR_SIMPLE);
+	if (debug_bb)
+		intel_bb_set_debug(ibb, true);
+
+	src = intel_buf_create(bops, 4096/32, 32, 8, 0, I915_TILING_NONE,
+			       I915_COMPRESSION_NONE);
+	dst = intel_buf_create(bops, 4096/32, 32, 8, 0, I915_TILING_NONE,
+			       I915_COMPRESSION_NONE);
+	gap = intel_buf_create(bops, 4096, 128, 8, 0, I915_TILING_NONE,
+			       I915_COMPRESSION_NONE);
+
+	/* vm for second blit */
+	vm_id1 = gem_vm_create(i915);
+
+	/* Get vm_id for default vm */
+	arg.ctx_id = ctx;
+	gem_context_get_param(i915, &arg);
+	vm_id2 = arg.value;
+
+	igt_debug("Vm_id1: %u\n", vm_id1);
+	igt_debug("Vm_id2: %u\n", vm_id2);
+
+	/* First blit without set calling setparam */
+	intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+	src1_addr = src->addr.offset;
+	dst1_addr = dst->addr.offset;
+	igt_debug("step1: src: 0x%llx, dst: 0x%llx\n",
+		  (long long) src1_addr, (long long) dst1_addr);
+
+	/* Open new allocator with vm_id */
+	vm = intel_allocator_open_vm(i915, vm_id1, INTEL_ALLOCATOR_SIMPLE);
+	prev_vm = intel_bb_assign_vm(ibb, vm, vm_id1, false);
+
+	intel_bb_add_intel_buf(ibb, gap, false);
+	intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+	src2_addr = src->addr.offset;
+	dst2_addr = dst->addr.offset;
+	igt_debug("step2: src: 0x%llx, dst: 0x%llx\n",
+		  (long long) src2_addr, (long long) dst2_addr);
+
+	/* Back with default vm */
+	intel_bb_assign_vm(ibb, prev_vm, vm_id2, false);
+	intel_bb_add_intel_buf(ibb, gap, false);
+	intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+	src3_addr = src->addr.offset;
+	dst3_addr = dst->addr.offset;
+	igt_debug("step3: src: 0x%llx, dst: 0x%llx\n",
+		  (long long) src3_addr, (long long) dst3_addr);
+
+	/* And exchange one more time */
+	intel_bb_assign_vm(ibb, vm, vm_id1, false);
+	intel_bb_copy_intel_buf(ibb, dst, src, 4096);
+	src4_addr = src->addr.offset;
+	dst4_addr = dst->addr.offset;
+	igt_debug("step4: src: 0x%llx, dst: 0x%llx\n",
+		  (long long) src4_addr, (long long) dst4_addr);
+
+	/* Close vm allocator after assigning prev_vm */
+	intel_bb_assign_vm(ibb, prev_vm, vm_id2, true);
+
+	/* Addresses should match for vm and prev_vm blits */
+	igt_assert_eq(src1_addr, src3_addr);
+	igt_assert_eq(dst1_addr, dst3_addr);
+	igt_assert_eq(src2_addr, src4_addr);
+	igt_assert_eq(dst2_addr, dst4_addr);
+
+	intel_bb_destroy(ibb);
+}
+
 /*
  * Make sure we lead to realloc in the intel_bb.
  */
@@ -1526,6 +1614,9 @@ igt_main_args("dpib", NULL, help_str, opt_handler, NULL)
 	igt_subtest("bb-with-allocator")
 		bb_with_allocator(bops);
 
+	igt_subtest("bb-with-vm")
+		bb_with_vm(bops);
+
 	igt_subtest("lot-of-buffers")
 		lot_of_buffers(bops);
 
-- 
2.26.0



More information about the Intel-gfx-trybot mailing list