[PATCH i-g-t v6 10/31] lib/intel_batchbuffer: Integrate intel_bb with allocator

Zbigniew Kempczyński zbigniew.kempczynski at intel.com
Mon Oct 19 07:51:00 UTC 2020


v2: fix leak in cache tree (Dominik)

Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek at intel.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
---
 lib/intel_batchbuffer.c | 356 ++++++++++++++++++++++++++++++----------
 lib/intel_batchbuffer.h |  21 ++-
 2 files changed, 286 insertions(+), 91 deletions(-)

diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index fc73495c..a0b0eaa8 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -49,7 +49,6 @@
 #include "media_spin.h"
 #include "gpgpu_fill.h"
 #include "igt_aux.h"
-#include "igt_rand.h"
 #include "i830_reg.h"
 #include "huc_copy.h"
 #include <glib.h>
@@ -1222,7 +1221,7 @@ static uint64_t gen8_canonical_addr(uint64_t address)
 
 static inline uint64_t __intel_bb_get_offset(struct intel_bb *ibb,
 					     uint32_t handle,
-					     uint32_t size,
+					     uint64_t size,
 					     uint32_t alignment)
 {
 	uint64_t offset;
@@ -1230,33 +1229,55 @@ static inline uint64_t __intel_bb_get_offset(struct intel_bb *ibb,
 	if (ibb->enforce_relocs)
 		return 0;
 
-	/* randomize the address, we try to avoid relocations */
-	offset = hars_petruska_f54_1_random64(&ibb->prng);
-	offset += 256 << 10; /* Keep the low 256k clear, for negative deltas */
-	offset &= ibb->gtt_size - 1;
-	offset &= ~(ibb->alignment - 1);
+	offset = intel_allocator_alloc(ibb->allocator_handle,
+				       handle, size, alignment);
+
 	offset = gen8_canonical_addr(offset);
 
 	return offset;
 }
 
 /**
- * intel_bb_create:
+ * __intel_bb_create:
  * @i915: drm fd
+ * @ctx: context
  * @size: size of the batchbuffer
+ * @do_relocs: use relocations or allocator
+ * @allocator_type: allocator type, must be INTEL_ALLOCATOR_NONE for relocations
+ *
+ * intel-bb assumes it will work in one of two modes - with relocations or
+ * with using allocator (currently RANDOM and SIMPLE are implemented).
+ * Some description is required to describe how they maintain the addresses.
+ *
+ * Before entering into each scenarios generic rule is intel-bb keeps objects
+ * and their offsets in the internal cache and reuses.
+ *
+ * 1. intel-bb with relocations
+ *
+ * Creating new intel-bb adds handle to cache implicitly and sets its address
+ * to 0.
+ *
+ * 2. with allocator
+ *
  *
  * Returns:
  *
  * Pointer the intel_bb, asserts on failure.
  */
 static struct intel_bb *
-__intel_bb_create(int i915, uint32_t ctx, uint32_t size, bool do_relocs)
+__intel_bb_create(int i915, uint32_t ctx, uint32_t size, bool do_relocs,
+		  uint8_t allocator_type)
 {
 	struct intel_bb *ibb = calloc(1, sizeof(*ibb));
 	uint64_t gtt_size;
 
 	igt_assert(ibb);
 
+	if (!do_relocs)
+		ibb->allocator_handle = intel_allocator_open(i915, ctx, allocator_type);
+	else
+		igt_assert(allocator_type == INTEL_ALLOCATOR_NONE);
+	ibb->allocator_type = allocator_type;
 	ibb->i915 = i915;
 	ibb->devid = intel_get_drm_devid(i915);
 	ibb->gen = intel_gen(ibb->devid);
@@ -1268,41 +1289,48 @@ __intel_bb_create(int i915, uint32_t ctx, uint32_t size, bool do_relocs)
 	ibb->batch = calloc(1, size);
 	igt_assert(ibb->batch);
 	ibb->ptr = ibb->batch;
-	ibb->prng = (uint32_t) to_user_pointer(ibb);
 	ibb->fence = -1;
 
 	gtt_size = gem_aperture_size(i915);
 	if (!gem_uses_full_ppgtt(i915))
 		gtt_size /= 2;
-	if ((gtt_size - 1) >> 32) {
+
+	if ((gtt_size - 1) >> 32)
 		ibb->supports_48b_address = true;
 
-		/*
-		 * Until we develop IGT address allocator we workaround
-		 * playing with canonical addresses with 47-bit set to 1
-		 * just by limiting gtt size to 46-bit when gtt is 47 or 48
-		 * bit size. Current interface doesn't pass bo size, so
-		 * limiting to 46 bit make us sure we won't enter to
-		 * addresses with 47-bit set (we use 32-bit size now so
-		 * still we fit 47-bit address space).
-		 */
-		if (gtt_size & (3ull << 47))
-			gtt_size = (1ull << 46);
-	}
 	ibb->gtt_size = gtt_size;
+	ibb->uses_full_ppgtt = gem_uses_full_ppgtt(i915);
 
-	ibb->batch_offset = __intel_bb_get_offset(ibb,
-						  ibb->handle,
-						  ibb->size,
-						  ibb->alignment);
+	ibb->batch_offset = INTEL_BUF_INVALID_ADDRESS;
 	intel_bb_add_object(ibb, ibb->handle, ibb->size,
-			    ibb->batch_offset, false);
+			    ibb->batch_offset, ibb->alignment, false);
 
 	ibb->refcount = 1;
 
 	return ibb;
 }
 
+/**
+ * intel_bb_create_full:
+ * @i915: drm fd
+ * @ctx: context
+ * @size: size of the batchbuffer
+ * @allocator_type: allocator type, SIMPLE, RANDOM, ...
+ *
+ * Creates bb with context passed in @ctx, size in @size and allocator type
+ * in @allocator_type. Relocations are set to false because IGT allocator
+ * is not used in that case.
+ *
+ * Returns:
+ *
+ * Pointer the intel_bb, asserts on failure.
+ */
+struct intel_bb *intel_bb_create_full(int i915, uint32_t ctx, uint32_t size,
+				      uint8_t allocator_type)
+{
+	return __intel_bb_create(i915, ctx, size, false, allocator_type);
+}
+
 /**
  * intel_bb_create:
  * @i915: drm fd
@@ -1316,7 +1344,7 @@ __intel_bb_create(int i915, uint32_t ctx, uint32_t size, bool do_relocs)
  */
 struct intel_bb *intel_bb_create(int i915, uint32_t size)
 {
-	return __intel_bb_create(i915, 0, size, false);
+	return __intel_bb_create(i915, 0, size, false, INTEL_ALLOCATOR_SIMPLE);
 }
 
 /**
@@ -1334,7 +1362,7 @@ struct intel_bb *intel_bb_create(int i915, uint32_t size)
 struct intel_bb *
 intel_bb_create_with_context(int i915, uint32_t ctx, uint32_t size)
 {
-	return __intel_bb_create(i915, ctx, size, false);
+	return __intel_bb_create(i915, ctx, size, false, INTEL_ALLOCATOR_SIMPLE);
 }
 
 /**
@@ -1351,7 +1379,7 @@ intel_bb_create_with_context(int i915, uint32_t ctx, uint32_t size)
  */
 struct intel_bb *intel_bb_create_with_relocs(int i915, uint32_t size)
 {
-	return __intel_bb_create(i915, 0, size, true);
+	return __intel_bb_create(i915, 0, size, true, INTEL_ALLOCATOR_NONE);
 }
 
 /**
@@ -1370,7 +1398,7 @@ struct intel_bb *intel_bb_create_with_relocs(int i915, uint32_t size)
 struct intel_bb *
 intel_bb_create_with_relocs_and_context(int i915, uint32_t ctx, uint32_t size)
 {
-	return __intel_bb_create(i915, ctx, size, true);
+	return __intel_bb_create(i915, ctx, size, true, INTEL_ALLOCATOR_NONE);
 }
 
 static void __intel_bb_destroy_relocations(struct intel_bb *ibb)
@@ -1424,6 +1452,10 @@ void intel_bb_destroy(struct intel_bb *ibb)
 	__intel_bb_destroy_objects(ibb);
 	__intel_bb_destroy_cache(ibb);
 
+	if (ibb->allocator_type != INTEL_ALLOCATOR_NONE) {
+		intel_allocator_free(ibb->allocator_handle, ibb->handle);
+		intel_allocator_close(ibb->allocator_handle);
+	}
 	gem_close(ibb->i915, ibb->handle);
 
 	if (ibb->fence >= 0)
@@ -1463,28 +1495,30 @@ void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache)
 	__intel_bb_destroy_objects(ibb);
 	__reallocate_objects(ibb);
 
-	if (purge_objects_cache) {
+	if (purge_objects_cache)
 		__intel_bb_destroy_cache(ibb);
+
+	/*
+	 * When we use allocators we're in no-reloc mode so we have to free
+	 * and reacquire offset (ibb->handle can change in multiprocess
+	 * environment).
+	 */
+	if (ibb->allocator_type != INTEL_ALLOCATOR_NONE)
+		intel_allocator_free(ibb->allocator_handle, ibb->handle);
+
+	gem_close(ibb->i915, ibb->handle);
+	ibb->handle = gem_create(ibb->i915, ibb->size);
+
+	/* Keep address for bb in reloc mode and RANDOM allocator */
+	if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE)
 		ibb->batch_offset = __intel_bb_get_offset(ibb,
 							  ibb->handle,
 							  ibb->size,
 							  ibb->alignment);
-	} else {
-		struct drm_i915_gem_exec_object2 *object;
-
-		object = intel_bb_find_object(ibb, ibb->handle);
-		ibb->batch_offset = object ? object->offset :
-					     __intel_bb_get_offset(ibb,
-								   ibb->handle,
-								   ibb->size,
-								   ibb->alignment);
-	}
-
-	gem_close(ibb->i915, ibb->handle);
-	ibb->handle = gem_create(ibb->i915, ibb->size);
 
 	intel_bb_add_object(ibb, ibb->handle, ibb->size,
-			    ibb->batch_offset, false);
+			    gen8_canonical_addr(ibb->batch_offset),
+			    ibb->alignment, false);
 	ibb->ptr = ibb->batch;
 	memset(ibb->batch, 0, ibb->size);
 }
@@ -1523,8 +1557,8 @@ void intel_bb_print(struct intel_bb *ibb)
 		 ibb->i915, ibb->gen, ibb->devid, ibb->debug);
 	igt_info("handle: %u, size: %u, batch: %p, ptr: %p\n",
 		 ibb->handle, ibb->size, ibb->batch, ibb->ptr);
-	igt_info("prng: %u, gtt_size: %" PRIu64 ", supports 48bit: %d\n",
-		 ibb->prng, ibb->gtt_size, ibb->supports_48b_address);
+	igt_info("gtt_size: %" PRIu64 ", supports 48bit: %d\n",
+		 ibb->gtt_size, ibb->supports_48b_address);
 	igt_info("ctx: %u\n", ibb->ctx);
 	igt_info("root: %p\n", ibb->root);
 	igt_info("objects: %p, num_objects: %u, allocated obj: %u\n",
@@ -1598,7 +1632,7 @@ __add_to_cache(struct intel_bb *ibb, uint32_t handle)
 	if (*found == object) {
 		memset(object, 0, sizeof(*object));
 		object->handle = handle;
-		object->alignment = ibb->alignment;
+		object->offset = INTEL_BUF_INVALID_ADDRESS;
 	} else {
 		free(object);
 		object = *found;
@@ -1607,6 +1641,23 @@ __add_to_cache(struct intel_bb *ibb, uint32_t handle)
 	return object;
 }
 
+static bool __remove_from_cache(struct intel_bb *ibb, uint32_t handle)
+{
+	struct drm_i915_gem_exec_object2 **found, *object;
+
+	object = intel_bb_find_object(ibb, handle);
+	if (!object)
+		return false;
+
+	found = tdelete((void *) object, &ibb->root, __compare_objects);
+	if (!found)
+		return false;
+
+	free(object);
+
+	return true;
+}
+
 static int __compare_handles(const void *p1, const void *p2)
 {
 	return (int) (*(int32_t *) p1 - *(int32_t *) p2);
@@ -1630,12 +1681,50 @@ static void __add_to_objects(struct intel_bb *ibb,
 	}
 }
 
+static void __remove_from_objects(struct intel_bb *ibb,
+				  struct drm_i915_gem_exec_object2 *object)
+{
+	uint32_t i, **handle, *to_free;
+	bool found = false;
+
+	for (i = 0; i < ibb->num_objects; i++) {
+		if (ibb->objects[i] == object) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		igt_warn("Trying to remove unknown object, handle: %u\n",
+			 object->handle);
+		return;
+	}
+
+	ibb->num_objects--;
+	if (i < ibb->num_objects)
+		memmove(&ibb->objects[i], &ibb->objects[i + 1],
+			sizeof(object) * (ibb->num_objects - i));
+
+	handle = tfind((void *) &object->handle,
+		       &ibb->current, __compare_handles);
+	if (!handle) {
+		igt_warn("Object %u doesn't exist in the tree, can't remove",
+			 object->handle);
+		return;
+	}
+
+	to_free = *handle;
+	tdelete((void *) &object->handle, &ibb->current, __compare_handles);
+	free(to_free);
+}
+
 /**
  * intel_bb_add_object:
  * @ibb: pointer to intel_bb
  * @handle: which handle to add to objects array
  * @size: object size
  * @offset: presumed offset of the object when no relocation is enforced
+ * @alignment: alignment of the object, if 0 it will be set to page size
  * @write: does a handle is a render target
  *
  * Function adds or updates execobj slot in bb objects array and
@@ -1643,23 +1732,68 @@ static void __add_to_objects(struct intel_bb *ibb,
  * be marked with EXEC_OBJECT_WRITE flag.
  */
 struct drm_i915_gem_exec_object2 *
-intel_bb_add_object(struct intel_bb *ibb, uint32_t handle, uint32_t size,
-		    uint64_t offset, bool write)
+intel_bb_add_object(struct intel_bb *ibb, uint32_t handle, uint64_t size,
+		    uint64_t offset, uint64_t alignment, bool write)
 {
 	struct drm_i915_gem_exec_object2 *object;
 
+	igt_assert(INVALID_ADDR(offset) || alignment == 0
+		   || ALIGN(offset, alignment) == offset);
+
 	object = __add_to_cache(ibb, handle);
+	object->alignment = alignment ?: 4096;
 	__add_to_objects(ibb, object);
 
-	/* Limit current offset to gtt size */
+	igt_debug("Passed offset: %lx, handle: %u, alignment: %lx\n",
+		  (long) offset, handle, (long) alignment);
+	/*
+	 * If object->offset == INVALID_ADDRESS we added freshly object to the
+	 * cache. In that case we have two choices:
+	 * a) get new offset (passed offset was invalid)
+	 * b) use offset passed in the call (valid)
+	 */
+	if (INVALID_ADDR(object->offset)) {
+		if (INVALID_ADDR(offset)) {
+			offset = __intel_bb_get_offset(ibb, handle, size,
+						       object->alignment);
+		} else {
+			offset = gen8_canonical_addr(offset & (ibb->gtt_size - 1));
+
+			/*
+			 * For simple allocator check entry consistency
+			 * - reserve if it is not already allocated.
+			 */
+			if (ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE) {
+				bool allocated, reserved;
+
+				reserved = intel_allocator_reserve_if_not_allocated(ibb->allocator_handle,
+										    handle, size, offset,
+										    &allocated);
+				igt_assert_f(allocated || reserved,
+					     "Can't get offset, allocated: %d, reserved: %d\n",
+					     allocated, reserved);
+			}
+		}
+	} else {
+		igt_assert_f(object->offset == gen8_canonical_addr(offset),
+			     "offset not match: %" PRIx64 " <> %" PRIx64 "\n",
+			     (uint64_t) object->offset, gen8_canonical_addr(offset));
+	}
+
+	igt_debug("Assigned offset: %lx, handle: %u, alignment: %lx\n",
+		  (long) offset, handle, (long) alignment);
 	object->offset = offset;
-	if (offset != INTEL_BUF_INVALID_ADDRESS)
+
+	/* Limit current offset to gtt size */
+	if (offset != INTEL_BUF_INVALID_ADDRESS) {
 		object->offset = gen8_canonical_addr(offset & (ibb->gtt_size - 1));
 
-	if (object->offset == INTEL_BUF_INVALID_ADDRESS)
+
+	} else {
 		object->offset = __intel_bb_get_offset(ibb,
 						       handle, size,
 						       object->alignment);
+	}
 
 	if (write)
 		object->flags |= EXEC_OBJECT_WRITE;
@@ -1667,40 +1801,95 @@ intel_bb_add_object(struct intel_bb *ibb, uint32_t handle, uint32_t size,
 	if (ibb->supports_48b_address)
 		object->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
 
+	if (ibb->uses_full_ppgtt && ibb->allocator_type == INTEL_ALLOCATOR_SIMPLE)
+		object->flags |= EXEC_OBJECT_PINNED;
+
 	return object;
 }
 
-struct drm_i915_gem_exec_object2 *
-intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf, bool write)
+bool intel_bb_remove_object(struct intel_bb *ibb, uint32_t handle,
+			    uint64_t offset, uint64_t size)
 {
-	struct drm_i915_gem_exec_object2 *obj;
+	struct drm_i915_gem_exec_object2 *object;
+	bool is_reserved;
 
-	obj = intel_bb_add_object(ibb, buf->handle,
-				  intel_buf_bo_size(buf),
-				  buf->addr.offset, write);
+	object = intel_bb_find_object(ibb, handle);
+	if (!object)
+		return false;
 
-	/* For compressed surfaces ensure address is aligned to 64KB */
-	if (ibb->gen >= 12 && buf->compression) {
-		obj->offset &= ~(0x10000 - 1);
-		obj->alignment = 0x10000;
+	if (ibb->allocator_type != INTEL_ALLOCATOR_NONE) {
+		intel_allocator_free(ibb->allocator_handle, handle);
+		is_reserved = intel_allocator_is_reserved(ibb->allocator_handle,
+							  size, offset);
+		if (is_reserved)
+			intel_allocator_unreserve(ibb->allocator_handle, handle,
+						  size, offset);
 	}
 
-	/* For gen3 ensure tiled buffers are aligned to power of two size */
-	if (ibb->gen == 3 && buf->tiling) {
-		uint64_t alignment = 1024 * 1024;
+	__remove_from_objects(ibb, object);
+	__remove_from_cache(ibb, handle);
 
-		while (alignment < buf->surface[0].size)
-			alignment <<= 1;
-		obj->offset &= ~(alignment - 1);
-		obj->alignment = alignment;
+	return true;
+}
+
+static struct drm_i915_gem_exec_object2 *
+__intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf,
+			 uint64_t alignment, bool write)
+{
+	struct drm_i915_gem_exec_object2 *obj;
+
+	igt_assert(ALIGN(alignment, 4096) == alignment);
+
+	if (!alignment) {
+		alignment = 0x1000;
+
+		if (ibb->gen >= 12 && buf->compression)
+			alignment = 0x10000;
+
+		/* For gen3 ensure tiled buffers are aligned to power of two size */
+		if (ibb->gen == 3 && buf->tiling) {
+			alignment = 1024 * 1024;
+
+			while (alignment < buf->surface[0].size)
+				alignment <<= 1;
+		}
 	}
 
-	/* Update address in intel_buf buffer */
+	obj = intel_bb_add_object(ibb, buf->handle, intel_buf_bo_size(buf),
+				  buf->addr.offset, alignment, write);
 	buf->addr.offset = obj->offset;
 
+	if (!ibb->enforce_relocs)
+		obj->alignment = alignment;
+
 	return obj;
 }
 
+struct drm_i915_gem_exec_object2 *
+intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf, bool write)
+{
+	return __intel_bb_add_intel_buf(ibb, buf, 0, write);
+}
+
+struct drm_i915_gem_exec_object2 *
+intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *buf,
+				      uint64_t alignment, bool write)
+{
+	return __intel_bb_add_intel_buf(ibb, buf, alignment, write);
+}
+
+bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
+{
+	bool removed = intel_bb_remove_object(ibb, buf->handle,
+					      buf->addr.offset,
+					      intel_buf_bo_size(buf));
+
+	if (removed)
+		buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
+
+	return removed;
+}
+
 struct drm_i915_gem_exec_object2 *
 intel_bb_find_object(struct intel_bb *ibb, uint32_t handle)
 {
@@ -1757,14 +1946,9 @@ intel_bb_object_clear_flag(struct intel_bb *ibb, uint32_t handle, uint64_t flag)
  * @write_domain: gem domain bit for the relocation
  * @delta: delta value to add to @buffer's gpu address
  * @offset: offset within bb to be patched
- * @presumed_offset: address of the object in address space. If -1 is passed
- * then final offset of the object will be randomized (for no-reloc bb) or
- * 0 (for reloc bb, in that case reloc.presumed_offset will be -1). In
- * case address is known it should passed in @presumed_offset (for no-reloc).
  *
  * Function allocates additional relocation slot in reloc array for a handle.
- * It also implicitly adds handle in the objects array if object doesn't
- * exists but doesn't mark it as a render target.
+ * Object must be previously added to bb.
  */
 static uint64_t intel_bb_add_reloc(struct intel_bb *ibb,
 				   uint32_t to_handle,
@@ -1779,13 +1963,8 @@ static uint64_t intel_bb_add_reloc(struct intel_bb *ibb,
 	struct drm_i915_gem_exec_object2 *object, *to_object;
 	uint32_t i;
 
-	if (ibb->enforce_relocs) {
-		object = intel_bb_add_object(ibb, handle, 0,
-					     presumed_offset, false);
-	} else {
-		object = intel_bb_find_object(ibb, handle);
-		igt_assert(object);
-	}
+	object = intel_bb_find_object(ibb, handle);
+	igt_assert(object);
 
 	/* For ibb we have relocs allocated in chunks */
 	if (to_handle == ibb->handle) {
@@ -2086,6 +2265,9 @@ static void update_offsets(struct intel_bb *ibb,
 		igt_assert(object);
 
 		object->offset = objects[i].offset;
+
+		if (i == 0)
+			ibb->batch_offset = object->offset;
 	}
 }
 
@@ -2210,7 +2392,7 @@ uint64_t intel_bb_get_object_offset(struct intel_bb *ibb, uint32_t handle)
 	if (address == INTEL_BUF_INVALID_ADDRESS)
 		return address;
 
-	return address & (ibb->gtt_size - 1);
+	return gen8_canonical_addr(address & (ibb->gtt_size - 1));
 }
 
 /**
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index ab1b0c28..b4cbe673 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -8,6 +8,7 @@
 #include "igt_core.h"
 #include "intel_reg.h"
 #include "drmtest.h"
+#include "intel_allocator.h"
 
 #define BATCH_SZ 4096
 #define BATCH_RESERVED 16
@@ -435,6 +436,9 @@ igt_media_spinfunc_t igt_get_media_spinfunc(int devid);
  * Batchbuffer without libdrm dependency
  */
 struct intel_bb {
+	uint64_t allocator_handle;
+	uint8_t allocator_type;
+
 	int i915;
 	unsigned int gen;
 	bool debug;
@@ -448,9 +452,9 @@ struct intel_bb {
 	uint64_t alignment;
 	int fence;
 
-	uint32_t prng;
 	uint64_t gtt_size;
 	bool supports_48b_address;
+	bool uses_full_ppgtt;
 
 	uint32_t ctx;
 
@@ -478,6 +482,8 @@ struct intel_bb {
 	int32_t refcount;
 };
 
+struct intel_bb *intel_bb_create_full(int i915, uint32_t ctx, uint32_t size,
+				      uint8_t allocator_type);
 struct intel_bb *intel_bb_create(int i915, uint32_t size);
 struct intel_bb *
 intel_bb_create_with_context(int i915, uint32_t ctx, uint32_t size);
@@ -504,6 +510,7 @@ void intel_bb_dump(struct intel_bb *ibb, const char *filename);
 void intel_bb_set_debug(struct intel_bb *ibb, bool debug);
 void intel_bb_set_dump_base64(struct intel_bb *ibb, bool dump);
 
+/*
 static inline uint64_t
 intel_bb_set_default_object_alignment(struct intel_bb *ibb, uint64_t alignment)
 {
@@ -519,6 +526,7 @@ intel_bb_get_default_object_alignment(struct intel_bb *ibb)
 {
 	return ibb->alignment;
 }
+*/
 
 static inline uint32_t intel_bb_offset(struct intel_bb *ibb)
 {
@@ -568,11 +576,16 @@ static inline void intel_bb_out(struct intel_bb *ibb, uint32_t dword)
 }
 
 struct drm_i915_gem_exec_object2 *
-intel_bb_add_object(struct intel_bb *ibb, uint32_t handle, uint32_t size,
-		    uint64_t offset, bool write);
+intel_bb_add_object(struct intel_bb *ibb, uint32_t handle, uint64_t size,
+		    uint64_t offset, uint64_t alignment, bool write);
+bool intel_bb_remove_object(struct intel_bb *ibb, uint32_t handle,
+			    uint64_t offset, uint64_t size);
 struct drm_i915_gem_exec_object2 *
 intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf, bool write);
-
+struct drm_i915_gem_exec_object2 *
+intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *buf,
+				      uint64_t alignment, bool write);
+bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
 struct drm_i915_gem_exec_object2 *
 intel_bb_find_object(struct intel_bb *ibb, uint32_t handle);
 
-- 
2.26.0



More information about the Intel-gfx-trybot mailing list