[PATCH v1 3/3] drm/buddy: dont go over the higher orders multiple times

Pierre-Eric Pelloux-Prayer pierre-eric.pelloux-prayer at amd.com
Wed Jul 2 16:12:04 UTC 2025


AFAICT the rationale for the loop is to:
1) try to allocate from the preferred order
2) if it fails, try higher orders (order + 1 -> max order)
3) if it fails, try smaller orders (order - 1 -> min order)

Steps 1 and 2 are covered by the loop going through [order, max_order].
Currently step 3 tries again [order, max_order] but with decreasing
values of order.

This is wasteful, so change it to evaluate only order.

Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer at amd.com>
---
 drivers/gpu/drm/drm_buddy.c | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index fd31322b3d41..9d3723f2cff9 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -590,13 +590,14 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm,
 
 static struct drm_buddy_block *
 get_maxblock(struct drm_buddy *mm, unsigned int order,
+	     unsigned int max_order,
 	     unsigned long flags)
 {
 	struct drm_buddy_block *max_block = NULL, *block = NULL;
 	bool wants_clear;
 	unsigned int i;
 
-	for (i = order; i <= mm->max_order; ++i) {
+	for (i = order; i <= max_order; ++i) {
 		struct drm_buddy_block *tmp_block;
 
 		wants_clear = flags & DRM_BUDDY_PREFER_CLEAR_ALLOCATION;
@@ -635,6 +636,7 @@ get_maxblock(struct drm_buddy *mm, unsigned int order,
 static struct drm_buddy_block *
 alloc_from_freelist(struct drm_buddy *mm,
 		    unsigned int order,
+		    unsigned int max_order,
 		    unsigned long flags)
 {
 	struct drm_buddy_block *block = NULL;
@@ -643,12 +645,12 @@ alloc_from_freelist(struct drm_buddy *mm,
 	int err;
 
 	if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
-		block = get_maxblock(mm, order, flags);
+		block = get_maxblock(mm, order, max_order, flags);
 		if (block)
 			/* Store the obtained block order */
 			tmp = drm_buddy_block_order(block);
 	} else {
-		for (tmp = order; tmp <= mm->max_order; ++tmp) {
+		for (tmp = order; tmp <= max_order; ++tmp) {
 			struct drm_buddy_block *tmp_block;
 			wants_clear = flags & DRM_BUDDY_PREFER_CLEAR_ALLOCATION;
 
@@ -956,6 +958,7 @@ static struct drm_buddy_block *
 __drm_buddy_alloc_blocks(struct drm_buddy *mm,
 			 u64 start, u64 end,
 			 unsigned int order,
+			 unsigned int max_order,
 			 unsigned long flags)
 {
 	if (flags & DRM_BUDDY_RANGE_ALLOCATION)
@@ -964,7 +967,7 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
 						     order, flags);
 	else
 		/* Allocate from freelist */
-		return alloc_from_freelist(mm, order, flags);
+		return alloc_from_freelist(mm, order, max_order, flags);
 }
 
 /**
@@ -995,7 +998,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 {
 	struct drm_buddy_block *block = NULL;
 	u64 original_size, original_min_size;
-	unsigned int min_order, order;
+	unsigned int min_order, max_order, order;
 	LIST_HEAD(allocated);
 	unsigned long pages;
 	int err;
@@ -1044,6 +1047,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 
 	do {
 		order = min(order, (unsigned int)fls(pages) - 1);
+		max_order = mm->max_order;
 		BUG_ON(order > mm->max_order);
 		BUG_ON(order < min_order);
 
@@ -1051,6 +1055,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 			block = __drm_buddy_alloc_blocks(mm, start,
 							 end,
 							 order,
+							 max_order,
 							 flags);
 			if (!IS_ERR(block))
 				break;
@@ -1062,6 +1067,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 					block = __drm_buddy_alloc_blocks(mm, start,
 									 end,
 									 min_order,
+									 mm->max_order,
 									 flags);
 					if (!IS_ERR(block)) {
 						order = min_order;
@@ -1082,6 +1088,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 				err = -ENOSPC;
 				goto err_free;
 			}
+			max_order = order;
 		} while (1);
 
 		mark_allocated(block);
-- 
2.43.0



More information about the dri-devel mailing list