[PATCH 3/5] noalloc

Matthew Auld matthew.auld at intel.com
Thu Nov 28 16:34:41 UTC 2019


Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/i915_buddy.c           | 409 +++++++++-----------
 drivers/gpu/drm/i915/i915_buddy.h           |  57 +--
 drivers/gpu/drm/i915/i915_globals.c         |   1 -
 drivers/gpu/drm/i915/selftests/i915_buddy.c |  38 +-
 4 files changed, 236 insertions(+), 269 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index e9d4200ce3bc..7780b36227ca 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -4,64 +4,21 @@
  */
 
 #include <linux/kmemleak.h>
+#include <linux/list_sort.h>
+#include <linux/mm.h>
 #include <linux/slab.h>
 
 #include "i915_buddy.h"
 
 #include "i915_gem.h"
-#include "i915_globals.h"
 #include "i915_utils.h"
 
-static struct i915_global_block {
-	struct i915_global base;
-	struct kmem_cache *slab_blocks;
-} global;
-
-static void i915_global_buddy_shrink(void)
-{
-	kmem_cache_shrink(global.slab_blocks);
-}
-
-static void i915_global_buddy_exit(void)
-{
-	kmem_cache_destroy(global.slab_blocks);
-}
-
-static struct i915_global_block global = { {
-	.shrink = i915_global_buddy_shrink,
-	.exit = i915_global_buddy_exit,
-} };
-
-int __init i915_global_buddy_init(void)
-{
-	global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
-	if (!global.slab_blocks)
-		return -ENOMEM;
-
-	i915_global_register(&global.base);
-	return 0;
-}
-
-static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
-						 unsigned int order,
-						 u64 offset)
+static void mark_poisoned(struct i915_buddy_block *block)
 {
-	struct i915_buddy_block *block;
-
-	block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
-	if (!block)
-		return NULL;
-
-	block->header = offset;
-	block->header |= order;
-	block->parent = parent;
-
-	return block;
-}
+	block->header &= ~I915_BUDDY_HEADER_STATE;
+	block->header |= I915_BUDDY_POISONED;
 
-static void i915_block_free(struct i915_buddy_block *block)
-{
-	kmem_cache_free(global.slab_blocks, block);
+	block->header &= ~I915_BUDDY_HEADER_ORDER;
 }
 
 static void mark_allocated(struct i915_buddy_block *block)
@@ -82,18 +39,14 @@ static void mark_free(struct i915_buddy_mm *mm,
 		 &mm->free_list[i915_buddy_block_order(block)]);
 }
 
-static void mark_split(struct i915_buddy_block *block)
-{
-	block->header &= ~I915_BUDDY_HEADER_STATE;
-	block->header |= I915_BUDDY_SPLIT;
-
-	list_del(&block->link);
-}
-
 int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
 {
-	unsigned int i;
-	u64 offset;
+	struct i915_buddy_block *pages, **roots;
+	struct list_head *free_list;
+	unsigned int max_order, n_roots;
+	unsigned long i;
+	u64 root_offset;
+	int err, chunk_shift;
 
 	if (size < chunk_size)
 		return -EINVAL;
@@ -106,146 +59,168 @@ int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
 
 	size = round_down(size, chunk_size);
 
-	mm->size = size;
-	mm->chunk_size = chunk_size;
-	mm->max_order = ilog2(size) - ilog2(chunk_size);
-
-	GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
+	chunk_shift = ilog2(chunk_size);
+	max_order = ilog2(size) - chunk_shift;
+	GEM_BUG_ON(max_order > I915_BUDDY_MAX_ORDER);
 
-	mm->free_list = kmalloc_array(mm->max_order + 1,
-				      sizeof(struct list_head),
-				      GFP_KERNEL);
-	if (!mm->free_list)
+	pages = kvmalloc_array(size >> chunk_shift,
+			       sizeof(struct i915_buddy_block),
+			       GFP_KERNEL);
+	if (!pages)
 		return -ENOMEM;
 
-	for (i = 0; i <= mm->max_order; ++i)
-		INIT_LIST_HEAD(&mm->free_list[i]);
-
-	mm->n_roots = hweight64(size);
-
-	mm->roots = kmalloc_array(mm->n_roots,
-				  sizeof(struct i915_buddy_block *),
+	free_list = kmalloc_array(max_order + 1,
+				  sizeof(struct list_head),
 				  GFP_KERNEL);
-	if (!mm->roots)
-		goto out_free_list;
-
-	offset = 0;
-	i = 0;
-
-	/*
-	 * Split into power-of-two blocks, in case we are given a size that is
-	 * not itself a power-of-two.
-	 */
-	do {
-		struct i915_buddy_block *root;
-		unsigned int order;
-		u64 root_size;
-
-		root_size = rounddown_pow_of_two(size);
-		order = ilog2(root_size) - ilog2(chunk_size);
-
-		root = i915_block_alloc(NULL, order, offset);
-		if (!root)
-			goto out_free_roots;
+	if (!free_list) {
+		err = -ENOMEM;
+		goto err_free_pages;
+	}
 
-		mark_free(mm, root);
+	for (i = 0; i <= max_order; ++i)
+		INIT_LIST_HEAD(&free_list[i]);
 
-		GEM_BUG_ON(i > mm->max_order);
-		GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
+	n_roots = hweight64(size);
+	roots = kmalloc_array(n_roots,
+			      sizeof(struct i915_buddy_block *),
+			      GFP_KERNEL);
+	if (!roots) {
+		err = -ENOMEM;
+		goto err_free_list;
+	}
 
-		mm->roots[i] = root;
+	mm->free_list = free_list;
+	mm->pages = pages;
+	mm->chunk_size = chunk_size;
+	mm->chunk_shift = chunk_shift;
+	mm->max_order = max_order;
+	mm->roots = roots;
+	mm->size = size;
+	mm->n_roots = n_roots;
+
+	root_offset = 0;
+	n_roots = 0;
+
+	for (i = 0; i < mm->size >> chunk_shift; ++i) {
+		struct i915_buddy_block *block = &pages[i];
+		u64 offset = i * chunk_size;
+
+		block->header = offset;
+		mark_poisoned(block);
+		GEM_BUG_ON(i915_buddy_block_order(block));
+		GEM_BUG_ON(i915_buddy_block_offset(block) != offset);
+
+		/*
+		 * Split into power-of-two block(s). This also handles the case
+		 * where we are given a size that is not itself a power-of-two.
+		 * These blocks(or roots) are special in that they have no
+		 * buddy when free. When it comes to destroying the mm, we
+		 * expect all of them to be marked as free, otherwise it means
+		 * someone leaked something.
+		 */
+		if (offset == root_offset) {
+			u64 root_size = rounddown_pow_of_two(size);
+			unsigned int order = ilog2(root_size) - chunk_shift;
+
+			block->header |= order;
+			mark_free(mm, block);
+			GEM_BUG_ON(i915_buddy_block_size(mm, block) != root_size);
+
+			roots[n_roots++] = block;
+			size -= root_size;
+			root_offset += root_size;
+		}
+	}
 
-		offset += root_size;
-		size -= root_size;
-		i++;
-	} while (size);
+	GEM_BUG_ON(n_roots != mm->n_roots);
+	GEM_BUG_ON(size);
 
 	return 0;
 
-out_free_roots:
-	while (i--)
-		i915_block_free(mm->roots[i]);
-	kfree(mm->roots);
-out_free_list:
-	kfree(mm->free_list);
-	return -ENOMEM;
+err_free_list:
+	kfree(free_list);
+err_free_pages:
+	kvfree(pages);
+	return err;
 }
 
 void i915_buddy_fini(struct i915_buddy_mm *mm)
 {
 	int i;
 
-	for (i = 0; i < mm->n_roots; ++i) {
+	for (i = 0; i < mm->n_roots; ++i)
 		GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
-		i915_block_free(mm->roots[i]);
-	}
 
-	kfree(mm->roots);
+	kvfree(mm->pages);
 	kfree(mm->free_list);
 }
 
-static int split_block(struct i915_buddy_mm *mm,
-		       struct i915_buddy_block *block)
+static struct i915_buddy_block *get_buddy(struct i915_buddy_mm *mm,
+					  struct i915_buddy_block *block)
 {
-	unsigned int block_order = i915_buddy_block_order(block) - 1;
-	u64 offset = i915_buddy_block_offset(block);
-
-	GEM_BUG_ON(!i915_buddy_block_is_free(block));
-	GEM_BUG_ON(!i915_buddy_block_order(block));
+	u64 block_size = i915_buddy_block_size(mm, block);
+	u64 buddy_offset = i915_buddy_block_offset(block) ^ block_size;
+	unsigned long idx = buddy_offset >> mm->chunk_shift;
 
-	block->left = i915_block_alloc(block, block_order, offset);
-	if (!block->left)
-		return -ENOMEM;
-
-	block->right = i915_block_alloc(block, block_order,
-					offset + (mm->chunk_size << block_order));
-	if (!block->right) {
-		i915_block_free(block->left);
-		return -ENOMEM;
-	}
-
-	mark_free(mm, block->left);
-	mark_free(mm, block->right);
-
-	mark_split(block);
+	/*
+	 * Check for pesky root node(s) -- we could have multiple if we have a
+	 * non-power-of-two address space.
+	 */
+	if (range_overflows(buddy_offset, block_size, mm->size))
+		return NULL;
 
-	return 0;
+	return &mm->pages[idx];
 }
 
-static struct i915_buddy_block *
-get_buddy(struct i915_buddy_block *block)
+static void split_block(struct i915_buddy_mm *mm,
+			struct i915_buddy_block *block)
 {
-	struct i915_buddy_block *parent;
+	struct i915_buddy_block *buddy;
 
-	parent = block->parent;
-	if (!parent)
-		return NULL;
+	GEM_BUG_ON(!i915_buddy_block_order(block));
+
+	/* Always before get_buddy. The order gives us the buddy. */
+	block->header--;
 
-	if (parent->left == block)
-		return parent->right;
+	list_move(&block->link,
+		  &mm->free_list[i915_buddy_block_order(block)]);
 
-	return parent->left;
+	buddy = get_buddy(mm, block);
+	GEM_BUG_ON(i915_buddy_block_state(buddy) != I915_BUDDY_POISONED);
+
+	buddy->header |= i915_buddy_block_order(block);
+	mark_free(mm, buddy);
 }
 
 static void __i915_buddy_free(struct i915_buddy_mm *mm,
 			      struct i915_buddy_block *block)
 {
-	struct i915_buddy_block *parent;
+	struct i915_buddy_block *buddy;
 
-	while ((parent = block->parent)) {
-		struct i915_buddy_block *buddy;
+	while ((buddy = get_buddy(mm, block))) {
+		GEM_BUG_ON(i915_buddy_block_state(buddy) ==
+			   I915_BUDDY_POISONED);
 
-		buddy = get_buddy(block);
+		if (i915_buddy_block_order(block) !=
+		    i915_buddy_block_order(buddy))
+			break;
 
 		if (!i915_buddy_block_is_free(buddy))
 			break;
 
-		list_del(&buddy->link);
-
-		i915_block_free(block);
-		i915_block_free(buddy);
+		/* Always merge into the left-most node */
+		if (i915_buddy_block_offset(buddy) <
+		    i915_buddy_block_offset(block)) {
+			mark_poisoned(block);
+			block = buddy;
+			list_del(&block->link);
+		} else {
+			mark_poisoned(buddy);
+			list_del(&buddy->link);
+		}
 
-		block = parent;
+		block->header++;
+		GEM_BUG_ON(i915_buddy_block_order(block) > mm->max_order);
 	}
 
 	mark_free(mm, block);
@@ -280,7 +255,6 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
 {
 	struct i915_buddy_block *block = NULL;
 	unsigned int i;
-	int err;
 
 	for (i = order; i <= mm->max_order; ++i) {
 		block = list_first_entry_or_null(&mm->free_list[i],
@@ -293,25 +267,11 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
 	if (!block)
 		return ERR_PTR(-ENOSPC);
 
-	GEM_BUG_ON(!i915_buddy_block_is_free(block));
-
-	while (i != order) {
-		err = split_block(mm, block);
-		if (unlikely(err))
-			goto out_free;
-
-		/* Go low */
-		block = block->left;
-		i--;
-	}
+	while (i-- != order)
+		split_block(mm, block);
 
 	mark_allocated(block);
-	kmemleak_update_trace(block);
 	return block;
-
-out_free:
-	__i915_buddy_free(mm, block);
-	return ERR_PTR(err);
 }
 
 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
@@ -324,6 +284,17 @@ static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
 	return s1 <= s2 && e1 >= e2;
 }
 
+static int sort_blocks(void *priv, struct list_head *A, struct list_head *B)
+{
+	struct i915_buddy_block *a = list_entry(A, typeof(*a), link);
+	struct i915_buddy_block *b = list_entry(B, typeof(*b), link);
+
+	if (i915_buddy_block_offset(a) < i915_buddy_block_offset(b))
+		return -1;
+	else
+		return 1;
+}
+
 /*
  * Allocate range. Note that it's safe to chain together multiple alloc_ranges
  * with the same blocks list.
@@ -338,13 +309,10 @@ int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
 			   struct list_head *blocks,
 			   u64 start, u64 size)
 {
-	struct i915_buddy_block *block;
-	struct i915_buddy_block *buddy;
+	struct i915_buddy_block *block, *next;
 	LIST_HEAD(allocated);
-	LIST_HEAD(dfs);
+	unsigned int order;
 	u64 end;
-	int err;
-	int i;
 
 	if (size < mm->chunk_size)
 		return -EINVAL;
@@ -355,73 +323,42 @@ int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
 	if (range_overflows(start, size, mm->size))
 		return -EINVAL;
 
-	for (i = 0; i < mm->n_roots; ++i)
-		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
-
 	end = start + size - 1;
+	order = mm->max_order;
 
-	do {
-		u64 block_start;
-		u64 block_end;
-
-		block = list_first_entry_or_null(&dfs,
-						 struct i915_buddy_block,
-						 tmp_link);
-		if (!block)
-			break;
-
-		list_del(&block->tmp_link);
-
-		block_start = i915_buddy_block_offset(block);
-		block_end = block_start + i915_buddy_block_size(mm, block) - 1;
-
-		if (!overlaps(start, end, block_start, block_end))
-			continue;
-
-		if (i915_buddy_block_is_allocated(block)) {
-			err = -ENOSPC;
-			goto err_free;
-		}
+	/*
+	 * We go on a bit of an expedition here, but the assumption is that
+	 * alloc_range is only ever intended for pre-allocating stuff at around
+	 * init time, and so we shouldn't expect to have an army of free
+	 * blocks, likewise we don't expect to return an army of blocks.
+	 */
 
-		if (contains(start, end, block_start, block_end)) {
-			if (!i915_buddy_block_is_free(block)) {
-				err = -ENOSPC;
-				goto err_free;
+	do {
+		list_for_each_entry_safe(block, next, &mm->free_list[order], link) {
+			u64 block_size = i915_buddy_block_size(mm, block);
+			u64 block_start = i915_buddy_block_offset(block);
+			u64 block_end = block_start + block_size - 1;
+
+			if (contains(start, end, block_start, block_end)) {
+				mark_allocated(block);
+				list_add_tail(&block->link, &allocated);
+
+				size -= block_size;
+				if (!size)
+					goto found;
+			} else if (overlaps(start, end, block_start, block_end)) {
+				split_block(mm, block);
 			}
-
-			mark_allocated(block);
-			list_add_tail(&block->link, &allocated);
-			continue;
 		}
+	} while (order--);
 
-		if (!i915_buddy_block_is_split(block)) {
-			err = split_block(mm, block);
-			if (unlikely(err))
-				goto err_undo;
-		}
-
-		list_add(&block->right->tmp_link, &dfs);
-		list_add(&block->left->tmp_link, &dfs);
-	} while (1);
+	i915_buddy_free_list(mm, &allocated);
+	return -ENOSPC;
 
+found:
+	list_sort(NULL, &allocated, sort_blocks);
 	list_splice_tail(&allocated, blocks);
 	return 0;
-
-err_undo:
-	/*
-	 * We really don't want to leave around a bunch of split blocks, since
-	 * bigger is better, so make sure we merge everything back before we
-	 * free the allocated blocks.
-	 */
-	buddy = get_buddy(block);
-	if (buddy &&
-	    (i915_buddy_block_is_free(block) &&
-	     i915_buddy_block_is_free(buddy)))
-		__i915_buddy_free(mm, block);
-
-err_free:
-	i915_buddy_free_list(mm, &allocated);
-	return err;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
index ed41f3507cdc..171606918d29 100644
--- a/drivers/gpu/drm/i915/i915_buddy.h
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -9,20 +9,34 @@
 #include <linux/bitops.h>
 #include <linux/list.h>
 
+/*
+ * A block is our fundamental unit for an allocation. Users should access the
+ * header state with the helpers.
+ *
+ * One thing to note is that any returned block from i915_buddy_alloc* is
+ * effectively the head block, which means that if the allocation is larger
+ * than order-zero(see chunk_size), then the user also has ownership of any
+ * sibling blocks. For example:
+ *
+ *	head = i915_buddy_alloc(&mm, size);
+ *	...
+ *	head->private = ...
+ *	...
+ *	for (i = 1; i < size >> mm.chunk_shift; ++i)
+ *		head[i].private = head;
+ *
+ * Just note that i915_buddy_block_free* only expects the head block.
+ */
 struct i915_buddy_block {
 #define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
 #define I915_BUDDY_HEADER_STATE  GENMASK_ULL(11, 10)
-#define   I915_BUDDY_ALLOCATED	   (1 << 10)
-#define   I915_BUDDY_FREE	   (2 << 10)
-#define   I915_BUDDY_SPLIT	   (3 << 10)
+#define   I915_BUDDY_ALLOCATED	(1 << 10)
+#define   I915_BUDDY_FREE	(2 << 10)
+#define   I915_BUDDY_POISONED	(3 << 10)
 #define I915_BUDDY_HEADER_ORDER  GENMASK_ULL(9, 0)
 	u64 header;
 
-	struct i915_buddy_block *left;
-	struct i915_buddy_block *right;
-	struct i915_buddy_block *parent;
-
-	void *private; /* owned by creator */
+	void *private; /* owned by creator/user */
 
 	/*
 	 * While the block is allocated by the user through i915_buddy_alloc*,
@@ -31,7 +45,6 @@ struct i915_buddy_block {
 	 * i915_buddy_free* ownership is given back to the mm.
 	 */
 	struct list_head link;
-	struct list_head tmp_link;
 };
 
 #define I915_BUDDY_MAX_ORDER  I915_BUDDY_HEADER_ORDER
@@ -39,7 +52,7 @@ struct i915_buddy_block {
 /*
  * Binary Buddy System.
  *
- * Locking should be handled by the user, a simple mutex around
+ * Locking should be handled by the user, a simple mutex/spin-lock around
  * i915_buddy_alloc* and i915_buddy_free* should suffice.
  */
 struct i915_buddy_mm {
@@ -47,12 +60,15 @@ struct i915_buddy_mm {
 	struct list_head *free_list;
 
 	/*
-	 * Maintain explicit binary tree(s) to track the allocation of the
-	 * address space. This gives us a simple way of finding a buddy block
-	 * and performing the potentially recursive merge step when freeing a
-	 * block.  Nodes are either allocated or free, in which case they will
-	 * also exist on the respective free list.
+	 * Maintain an array to track the allocation of the address space,
+	 * where we have an entry for each chunk_size page. This gives us a
+	 * simple way of finding a buddy block and performing the potentially
+	 * recursive merge step when freeing a block.  Nodes are either
+	 * allocated or free, in which case they will also exist on the
+	 * respective free list.
 	 */
+	struct i915_buddy_block *pages;
+
 	struct i915_buddy_block **roots;
 
 	/*
@@ -62,9 +78,10 @@ struct i915_buddy_mm {
 	unsigned int n_roots;
 	unsigned int max_order;
 
-	/* Must be at least PAGE_SIZE */
-	u64 chunk_size;
 	u64 size;
+	/* Order-zero alloction size. Must be at least PAGE_SIZE. */
+	u64 chunk_size;
+	int chunk_shift;
 };
 
 static inline u64
@@ -97,12 +114,6 @@ i915_buddy_block_is_free(struct i915_buddy_block *block)
 	return i915_buddy_block_state(block) == I915_BUDDY_FREE;
 }
 
-static inline bool
-i915_buddy_block_is_split(struct i915_buddy_block *block)
-{
-	return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
-}
-
 static inline u64
 i915_buddy_block_size(struct i915_buddy_mm *mm,
 		      struct i915_buddy_block *block)
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index be127cd28931..2d5fcba98841 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -62,7 +62,6 @@ static void __i915_globals_cleanup(void)
 
 static __initconst int (* const initfn[])(void) = {
 	i915_global_active_init,
-	i915_global_buddy_init,
 	i915_global_context_init,
 	i915_global_gem_context_init,
 	i915_global_objects_init,
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 1b856bae67b5..adf3895f2c32 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -20,7 +20,7 @@ static void __igt_dump_block(struct i915_buddy_mm *mm,
 	       i915_buddy_block_order(block),
 	       i915_buddy_block_offset(block),
 	       i915_buddy_block_size(mm, block),
-	       yesno(!block->parent),
+	       yesno(!get_buddy(mm, block)),
 	       yesno(buddy));
 }
 
@@ -31,7 +31,7 @@ static void igt_dump_block(struct i915_buddy_mm *mm,
 
 	__igt_dump_block(mm, block, false);
 
-	buddy = get_buddy(block);
+	buddy = get_buddy(mm, block);
 	if (buddy)
 		__igt_dump_block(mm, buddy, true);
 }
@@ -41,6 +41,7 @@ static int igt_check_block(struct i915_buddy_mm *mm,
 {
 	struct i915_buddy_block *buddy;
 	unsigned int block_state;
+	unsigned long i;
 	u64 block_size;
 	u64 offset;
 	int err = 0;
@@ -49,7 +50,7 @@ static int igt_check_block(struct i915_buddy_mm *mm,
 
 	if (block_state != I915_BUDDY_ALLOCATED &&
 	    block_state != I915_BUDDY_FREE &&
-	    block_state != I915_BUDDY_SPLIT) {
+	    block_state != I915_BUDDY_POISONED) {
 		pr_err("block state mismatch\n");
 		err = -EINVAL;
 	}
@@ -82,21 +83,35 @@ static int igt_check_block(struct i915_buddy_mm *mm,
 		err = -EINVAL;
 	}
 
-	buddy = get_buddy(block);
+	for (i = 1; i < block_size >> mm->chunk_shift; ++i) {
+		u64 expected_offset = offset + i * mm->chunk_size;
+		struct i915_buddy_block *sub_block = &block[i];
 
-	if (!buddy && block->parent) {
-		pr_err("buddy has gone fishing\n");
-		err = -EINVAL;
+		if (i915_buddy_block_order(sub_block)) {
+			pr_err("sub_block is not order-zero\n");
+			err = -EINVAL;
+		}
+
+		if (i915_buddy_block_offset(sub_block) != expected_offset) {
+			pr_err("sub_block offset mismatch\n");
+			err = -EINVAL;
+		}
+
+		if (i915_buddy_block_state(sub_block) != I915_BUDDY_POISONED) {
+			pr_err("sub_block state mimatch\n");
+			err = -EINVAL;
+		}
 	}
 
+	buddy = get_buddy(mm, block);
 	if (buddy) {
 		if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
 			pr_err("buddy has wrong offset\n");
 			err = -EINVAL;
 		}
 
-		if (i915_buddy_block_size(mm, buddy) != block_size) {
-			pr_err("buddy size mismatch\n");
+		if (i915_buddy_block_state(buddy) == I915_BUDDY_POISONED) {
+			pr_err("buddy is poisoned\n");
 			err = -EINVAL;
 		}
 
@@ -210,6 +225,11 @@ static int igt_check_mm(struct i915_buddy_mm *mm)
 			break;
 		}
 
+		if (get_buddy(mm, root)) {
+			pr_err("root(%u) has buddy\n", i);
+			err = -EINVAL;
+		}
+
 		err = igt_check_block(mm, root);
 
 		if (!i915_buddy_block_is_free(root)) {
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list