[PATCH 091/131] drm-search-fix

Chris Wilson chris at chris-wilson.co.uk
Sat Aug 6 07:36:58 UTC 2016


---
 drivers/gpu/drm/drm_mm.c                   | 685 ++++++++++++-----------------
 drivers/gpu/drm/drm_vma_manager.c          |   3 +-
 drivers/gpu/drm/i915/i915_gem.c            |  25 +-
 drivers/gpu/drm/i915/i915_gem_evict.c      |  23 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   3 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |   4 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c     |   3 +-
 drivers/gpu/drm/sis/sis_mm.c               |   6 +-
 drivers/gpu/drm/ttm/ttm_bo_manager.c       |  15 +-
 drivers/gpu/drm/via/via_mm.c               |   4 +-
 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c     |  10 +-
 include/drm/drm_mm.h                       | 181 +++++---
 12 files changed, 427 insertions(+), 535 deletions(-)

diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 9f2c86cc662e..08cd46d94aa8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -91,19 +91,6 @@
  * some basic allocator dumpers for debugging.
  */
 
-static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-						u64 size,
-						u64 alignment,
-						unsigned long color,
-						enum drm_mm_search_flags flags);
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-						u64 size,
-						u64 alignment,
-						unsigned long color,
-						u64 start,
-						u64 end,
-						enum drm_mm_search_flags flags);
-
 #define START(node) ((node)->start)
 #define LAST(node)  ((node)->start + (node)->size - 1)
 
@@ -170,63 +157,45 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 			    &drm_mm_interval_tree_augment);
 }
 
-static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
-				 struct drm_mm_node *node,
-				 u64 size, u64 alignment,
-				 unsigned long color,
-				 enum drm_mm_allocator_flags flags)
+#define RB_INSERT(root, member, expr) do { \
+	struct rb_node **link = &root.rb_node, *rb = NULL; \
+	u64 x = expr(node); \
+	while (*link) { \
+		rb = *link; \
+		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
+			link = &rb->rb_left; \
+		else \
+			link = &rb->rb_right; \
+	} \
+	rb_link_node(&node->member, rb, link); \
+	rb_insert_color(&node->member, &root); \
+} while (0)
+
+#define HOLE_SIZE(NODE) ((NODE)->hole_size)
+#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
+
+static void add_hole(struct drm_mm_node *node)
 {
-	struct drm_mm *mm = hole_node->mm;
-	u64 hole_start = drm_mm_hole_node_start(hole_node);
-	u64 hole_end = drm_mm_hole_node_end(hole_node);
-	u64 adj_start = hole_start;
-	u64 adj_end = hole_end;
-
-	BUG_ON(node->allocated);
-
-	if (mm->color_adjust)
-		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
-	if (flags & DRM_MM_CREATE_TOP)
-		adj_start = adj_end - size;
-
-	if (alignment) {
-		u64 rem;
-
-		div64_u64_rem(adj_start, alignment, &rem);
-		if (rem) {
-			if (flags & DRM_MM_CREATE_TOP)
-				adj_start -= rem;
-			else
-				adj_start += alignment - rem;
-		}
-	}
-
-	BUG_ON(adj_start < hole_start);
-	BUG_ON(adj_end > hole_end);
-
-	if (adj_start == hole_start) {
-		hole_node->hole_follows = 0;
-		list_del(&hole_node->hole_stack);
-	}
+	struct drm_mm *mm = node->mm;
 
-	node->start = adj_start;
-	node->size = size;
-	node->mm = mm;
-	node->color = color;
-	node->allocated = 1;
+	node->hole_size =
+		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
 
-	list_add(&node->node_list, &hole_node->node_list);
+	RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
 
-	drm_mm_interval_tree_add_node(hole_node, node);
+	list_add(&node->hole_stack, &mm->hole_stack);
+}
 
-	BUG_ON(node->start + node->size > adj_end);
+static void rm_hole(struct drm_mm_node *node)
+{
+	if (!node->hole_size)
+		return;
 
-	node->hole_follows = 0;
-	if (__drm_mm_hole_node_start(node) < hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
-	}
+	list_del(&node->hole_stack);
+	rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+	node->hole_size = 0;
 }
 
 /**
@@ -266,11 +235,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 	}
 
 	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
-	if (!hole->hole_follows)
+	if (!hole->hole_size)
 		return -ENOSPC;
 
 	hole_start = __drm_mm_hole_node_start(hole);
-	hole_end = __drm_mm_hole_node_end(hole);
+	hole_end = hole_start + hole->hole_size;
 	if (hole_start > node->start || hole_end < end)
 		return -ENOSPC;
 
@@ -284,125 +253,112 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 	}
 
 	node->mm = mm;
-	node->allocated = 1;
 
 	list_add(&node->node_list, &hole->node_list);
-
 	drm_mm_interval_tree_add_node(hole, node);
+	node->allocated = 1;
+	node->hole_size = 0;
 
-	if (node->start == hole_start) {
-		hole->hole_follows = 0;
-		list_del(&hole->hole_stack);
-	}
-
-	node->hole_follows = 0;
-	if (end != hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
-	}
+	rm_hole(hole);
+	if (node->start > hole_start)
+		add_hole(hole);
+	if (end < hole_end)
+		add_hole(node);
 
 	return 0;
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
-/**
- * drm_mm_insert_node_generic - search for space and insert @node
- * @mm: drm_mm to allocate from
- * @node: preallocate node to insert
- * @size: size of the allocation
- * @alignment: alignment of the allocation
- * @color: opaque tag value to use for this node
- * @sflags: flags to fine-tune the allocation search
- * @aflags: flags to fine-tune the allocation behavior
- *
- * The preallocated node must be cleared to 0.
- *
- * Returns:
- * 0 on success, -ENOSPC if there's no suitable hole.
- */
-int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
-			       u64 size, u64 alignment,
-			       unsigned long color,
-			       enum drm_mm_search_flags sflags,
-			       enum drm_mm_allocator_flags aflags)
+static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
 {
-	struct drm_mm_node *hole_node;
-
-	if (WARN_ON(size == 0))
-		return -EINVAL;
-
-	hole_node = drm_mm_search_free_generic(mm, size, alignment,
-					       color, sflags);
-	if (!hole_node)
-		return -ENOSPC;
-
-	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
-	return 0;
+	return rb ? rb_entry(rb, struct drm_mm_node, rb_hole_size) : NULL;
 }
-EXPORT_SYMBOL(drm_mm_insert_node_generic);
-
-static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
-				       struct drm_mm_node *node,
-				       u64 size, u64 alignment,
-				       unsigned long color,
-				       u64 start, u64 end,
-				       enum drm_mm_allocator_flags flags)
-{
-	struct drm_mm *mm = hole_node->mm;
-	u64 hole_start = drm_mm_hole_node_start(hole_node);
-	u64 hole_end = drm_mm_hole_node_end(hole_node);
-	u64 adj_start = hole_start;
-	u64 adj_end = hole_end;
-
-	BUG_ON(!hole_node->hole_follows || node->allocated);
 
-	if (adj_start < start)
-		adj_start = start;
-	if (adj_end > end)
-		adj_end = end;
+static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
+{
+	return rb ? rb_entry(rb, struct drm_mm_node, rb_hole_addr) : NULL;
+}
 
-	if (mm->color_adjust)
-		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+static inline u64 rb_hole_size(struct rb_node *rb)
+{
+	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
 
-	if (flags & DRM_MM_CREATE_TOP)
-		adj_start = adj_end - size;
+static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
+{
+	struct rb_node *best = NULL;
+	struct rb_node **link = &mm->holes_size.rb_node;
+	while (*link) {
+		struct rb_node *rb = *link;
+		if (size <= rb_hole_size(rb)) {
+			best = rb;
+			link = &rb->rb_left;
+		} else
+			link = &rb->rb_right;
+	}
+	return rb_hole_size_to_node(best);
+}
 
-	if (alignment) {
-		u64 rem;
+static struct drm_mm_node *low_hole(struct drm_mm *mm, u64 addr)
+{
+	struct drm_mm_node *node = NULL;
+	struct rb_node **link = &mm->holes_addr.rb_node;
+	while (*link) {
+		node = rb_hole_addr_to_node(*link);
+		if (addr == __drm_mm_hole_node_start(node))
+			return node;
 
-		div64_u64_rem(adj_start, alignment, &rem);
-		if (rem) {
-			if (flags & DRM_MM_CREATE_TOP)
-				adj_start -= rem;
-			else
-				adj_start += alignment - rem;
-		}
+		if (addr < __drm_mm_hole_node_start(node))
+			link = &node->rb_hole_addr.rb_left;
+		else
+			link = &node->rb_hole_addr.rb_right;
 	}
+	return node;
+}
 
-	if (adj_start == hole_start) {
-		hole_node->hole_follows = 0;
-		list_del(&hole_node->hole_stack);
-	}
+static struct drm_mm_node *
+first_hole(struct drm_mm *mm, u64 start, u64 end, u64 size, unsigned flags)
+{
+	if (RB_EMPTY_ROOT(&mm->holes_size))
+		return NULL;
 
-	node->start = adj_start;
-	node->size = size;
-	node->mm = mm;
-	node->color = color;
-	node->allocated = 1;
+	switch (flags) {
+	default:
+	case DRM_MM_INSERT_BEST:
+		return best_hole(mm, size);
 
-	list_add(&node->node_list, &hole_node->node_list);
+	case DRM_MM_INSERT_LOW:
+		return low_hole(mm, start);
 
-	drm_mm_interval_tree_add_node(hole_node, node);
+	case DRM_MM_INSERT_HIGH:
+		return rb_hole_addr_to_node(rb_last(&mm->holes_addr));
 
-	BUG_ON(node->start < start);
-	BUG_ON(node->start < adj_start);
-	BUG_ON(node->start + node->size > adj_end);
-	BUG_ON(node->start + node->size > end);
+	case DRM_MM_INSERT_EVICT:
+		return list_first_entry_or_null(&mm->hole_stack,
+						struct drm_mm_node,
+						hole_stack);
+	}
+}
 
-	node->hole_follows = 0;
-	if (__drm_mm_hole_node_start(node) < hole_end) {
-		list_add(&node->hole_stack, &mm->hole_stack);
-		node->hole_follows = 1;
+static struct drm_mm_node *
+next_hole(struct drm_mm *mm, struct drm_mm_node *node, unsigned flags)
+{
+	switch (flags) {
+	default:
+	case DRM_MM_INSERT_BEST:
+		return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+
+	case DRM_MM_INSERT_LOW:
+		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+
+	case DRM_MM_INSERT_HIGH:
+		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+
+	case DRM_MM_INSERT_EVICT:
+		node = list_entry(node->hole_stack.next,
+				  struct drm_mm_node,
+				  hole_stack);
+		return &node->hole_stack == &mm->hole_stack ? NULL : node;
 	}
 }
 
@@ -423,28 +379,93 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  * Returns:
  * 0 on success, -ENOSPC if there's no suitable hole.
  */
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
+int drm_mm_insert_node_in_range_generic(struct drm_mm * const mm,
+					struct drm_mm_node * const node,
 					u64 size, u64 alignment,
 					unsigned long color,
 					u64 start, u64 end,
-					enum drm_mm_search_flags sflags,
-					enum drm_mm_allocator_flags aflags)
+					unsigned flags)
 {
-	struct drm_mm_node *hole_node;
+	struct drm_mm_node *entry;
+	u64 alignment_mask;
 
 	if (WARN_ON(size == 0))
 		return -EINVAL;
 
-	hole_node = drm_mm_search_free_in_range_generic(mm,
-							size, alignment, color,
-							start, end, sflags);
-	if (!hole_node)
+	if (end - start < size)
 		return -ENOSPC;
 
-	drm_mm_insert_helper_range(hole_node, node,
-				   size, alignment, color,
-				   start, end, aflags);
-	return 0;
+	alignment_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
+	for (entry = first_hole(mm, start, end, size, flags); entry;
+	     entry = next_hole(mm, entry, flags)) {
+		u64 hole_start = __drm_mm_hole_node_start(entry);
+		u64 hole_end = hole_start + entry->hole_size;
+		u64 adj_start = hole_start;
+		u64 adj_end = hole_end;
+
+		if  (flags == DRM_MM_INSERT_LOW && hole_start >= end)
+			break;
+
+		if  (flags == DRM_MM_INSERT_HIGH && hole_end <= start)
+			break;
+
+		if (mm->color_adjust)
+			mm->color_adjust(entry, color, &adj_start, &adj_end);
+
+		if (adj_start < start)
+			adj_start = start;
+		if (adj_end > end)
+			adj_end = end;
+		if (adj_end <= adj_start || adj_end - adj_start < size)
+			continue;
+
+		if (flags == DRM_MM_INSERT_HIGH)
+			adj_start = adj_end - size;
+
+		if (alignment) {
+			u64 rem;
+
+			if (alignment_mask)
+				rem = adj_start & alignment_mask;
+			else
+				div64_u64_rem(adj_start, alignment, &rem);
+			if (rem) {
+				adj_start -= rem;
+				if (flags != DRM_MM_INSERT_HIGH)
+					adj_start += alignment;
+				if (adj_start < hole_start ||
+				    adj_end <= adj_start ||
+				    adj_end - adj_start < size) {
+					/* Searching for alignment may cause
+					 * many many misses and trigger the
+					 * NMI watchdog if we are not careful.
+					 */
+					cond_resched();
+					continue;
+				}
+			}
+		}
+
+		node->mm = mm;
+		node->size = size;
+		node->start = adj_start;
+		node->color = color;
+		node->hole_size = 0;
+
+		list_add(&node->node_list, &entry->node_list);
+		drm_mm_interval_tree_add_node(entry, node);
+		node->allocated = 1;
+
+		rm_hole(entry);
+		if (adj_start > hole_start)
+			add_hole(entry);
+		if (adj_start + size < hole_end)
+			add_hole(node);
+
+		return 0;
+	}
+
+	return -ENOSPC;
 }
 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
 
@@ -464,139 +485,21 @@ void drm_mm_remove_node(struct drm_mm_node *node)
 	if (WARN_ON(!node->allocated))
 		return;
 
-	BUG_ON(node->scanned_block || node->scanned_prev_free
-				   || node->scanned_next_free);
+	BUG_ON(node->scanned_block);
 
 	prev_node =
 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
 
-	if (node->hole_follows) {
-		BUG_ON(__drm_mm_hole_node_start(node) ==
-		       __drm_mm_hole_node_end(node));
-		list_del(&node->hole_stack);
-	} else
-		BUG_ON(__drm_mm_hole_node_start(node) !=
-		       __drm_mm_hole_node_end(node));
-
-
-	if (!prev_node->hole_follows) {
-		prev_node->hole_follows = 1;
-		list_add(&prev_node->hole_stack, &mm->hole_stack);
-	} else
-		list_move(&prev_node->hole_stack, &mm->hole_stack);
+	rm_hole(node);
 
 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
 	list_del(&node->node_list);
 	node->allocated = 0;
-}
-EXPORT_SYMBOL(drm_mm_remove_node);
-
-static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
-{
-	if (end - start < size)
-		return 0;
-
-	if (alignment) {
-		u64 rem;
-
-		div64_u64_rem(start, alignment, &rem);
-		if (rem)
-			start += alignment - rem;
-	}
-
-	return end >= start + size;
-}
-
-static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
-						      u64 size,
-						      u64 alignment,
-						      unsigned long color,
-						      enum drm_mm_search_flags flags)
-{
-	struct drm_mm_node *entry;
-	struct drm_mm_node *best;
-	u64 adj_start;
-	u64 adj_end;
-	u64 best_size;
-
-	BUG_ON(mm->scanned_blocks);
-
-	best = NULL;
-	best_size = ~0UL;
-
-	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-			       flags & DRM_MM_SEARCH_BELOW) {
-		u64 hole_size = adj_end - adj_start;
-
-		if (mm->color_adjust) {
-			mm->color_adjust(entry, color, &adj_start, &adj_end);
-			if (adj_end <= adj_start)
-				continue;
-		}
-
-		if (!check_free_hole(adj_start, adj_end, size, alignment))
-			continue;
-
-		if (!(flags & DRM_MM_SEARCH_BEST))
-			return entry;
-
-		if (hole_size < best_size) {
-			best = entry;
-			best_size = hole_size;
-		}
-	}
-
-	return best;
-}
-
-static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
-							u64 size,
-							u64 alignment,
-							unsigned long color,
-							u64 start,
-							u64 end,
-							enum drm_mm_search_flags flags)
-{
-	struct drm_mm_node *entry;
-	struct drm_mm_node *best;
-	u64 adj_start;
-	u64 adj_end;
-	u64 best_size;
-
-	BUG_ON(mm->scanned_blocks);
-
-	best = NULL;
-	best_size = ~0UL;
-
-	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-			       flags & DRM_MM_SEARCH_BELOW) {
-		u64 hole_size = adj_end - adj_start;
-
-		if (adj_start < start)
-			adj_start = start;
-		if (adj_end > end)
-			adj_end = end;
-
-		if (mm->color_adjust) {
-			mm->color_adjust(entry, color, &adj_start, &adj_end);
-			if (adj_end <= adj_start)
-				continue;
-		}
-
-		if (!check_free_hole(adj_start, adj_end, size, alignment))
-			continue;
-
-		if (!(flags & DRM_MM_SEARCH_BEST))
-			return entry;
-
-		if (hole_size < best_size) {
-			best = entry;
-			best_size = hole_size;
-		}
-	}
 
-	return best;
+	rm_hole(prev_node);
+	add_hole(prev_node);
 }
+EXPORT_SYMBOL(drm_mm_remove_node);
 
 /**
  * drm_mm_replace_node - move an allocation from @old to @new
@@ -609,15 +512,20 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
  */
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
+	memcpy(old, new, sizeof(struct drm_mm_node));
+
 	list_replace(&old->node_list, &new->node_list);
-	list_replace(&old->hole_stack, &new->hole_stack);
 	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
-	new->hole_follows = old->hole_follows;
-	new->mm = old->mm;
-	new->start = old->start;
-	new->size = old->size;
-	new->color = old->color;
-	new->__subtree_last = old->__subtree_last;
+
+	if (old->hole_size) {
+		list_replace(&old->hole_stack, &new->hole_stack);
+		rb_replace_node(&old->rb_hole_size,
+				&new->rb_hole_size,
+				&old->mm->holes_size);
+		rb_replace_node(&old->rb_hole_addr,
+				&new->rb_hole_addr,
+				&old->mm->holes_addr);
+	}
 
 	old->allocated = 0;
 	new->allocated = 1;
@@ -653,37 +561,6 @@ EXPORT_SYMBOL(drm_mm_replace_node);
  */
 
 /**
- * drm_mm_init_scan - initialize lru scanning
- * @mm: drm_mm to scan
- * @size: size of the allocation
- * @alignment: alignment of the allocation
- * @color: opaque tag value to use for the allocation
- *
- * This simply sets up the scanning routines with the parameters for the desired
- * hole. Note that there's no need to specify allocation flags, since they only
- * change the place a node is allocated from within a suitable hole.
- *
- * Warning:
- * As long as the scan list is non-empty, no other operations than
- * adding/removing nodes to/from the scan list are allowed.
- */
-void drm_mm_init_scan(struct drm_mm *mm,
-		      u64 size,
-		      u64 alignment,
-		      unsigned long color)
-{
-	mm->scan_color = color;
-	mm->scan_alignment = alignment;
-	mm->scan_size = size;
-	mm->scanned_blocks = 0;
-	mm->scan_hit_start = 0;
-	mm->scan_hit_end = 0;
-	mm->scan_check_range = 0;
-	mm->prev_scanned_node = NULL;
-}
-EXPORT_SYMBOL(drm_mm_init_scan);
-
-/**
  * drm_mm_init_scan - initialize range-restricted lru scanning
  * @mm: drm_mm to scan
  * @size: size of the allocation
@@ -700,23 +577,25 @@ EXPORT_SYMBOL(drm_mm_init_scan);
  * As long as the scan list is non-empty, no other operations than
  * adding/removing nodes to/from the scan list are allowed.
  */
-void drm_mm_init_scan_with_range(struct drm_mm *mm,
+void drm_mm_init_scan_with_range(struct drm_mm_scan *scan,
 				 u64 size,
 				 u64 alignment,
 				 unsigned long color,
 				 u64 start,
-				 u64 end)
+				 u64 end,
+				 unsigned flags)
 {
-	mm->scan_color = color;
-	mm->scan_alignment = alignment;
-	mm->scan_size = size;
-	mm->scanned_blocks = 0;
-	mm->scan_hit_start = 0;
-	mm->scan_hit_end = 0;
-	mm->scan_start = start;
-	mm->scan_end = end;
-	mm->scan_check_range = 1;
-	mm->prev_scanned_node = NULL;
+	scan->flags = flags;
+	scan->color = color;
+	scan->alignment = alignment;
+	scan->alignment_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
+	scan->size = size;
+	scan->hit_start = 0;
+	scan->hit_end = 0;
+	scan->start = start;
+	scan->end = end;
+
+	scan->prev_node = NULL;
 }
 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
 
@@ -730,46 +609,57 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
  * Returns:
  * True if a hole has been found, false otherwise.
  */
-bool drm_mm_scan_add_block(struct drm_mm_node *node)
+bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
+			   struct drm_mm_node *node)
 {
 	struct drm_mm *mm = node->mm;
 	struct drm_mm_node *prev_node;
-	u64 hole_start, hole_end;
 	u64 adj_start, adj_end;
 
-	mm->scanned_blocks++;
-
 	BUG_ON(node->scanned_block);
 	node->scanned_block = 1;
 
 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
 			       node_list);
 
-	node->scanned_preceeds_hole = prev_node->hole_follows;
-	prev_node->hole_follows = 1;
-	list_del(&node->node_list);
+	__list_del_entry(&node->node_list);
 	node->node_list.prev = &prev_node->node_list;
-	node->node_list.next = &mm->prev_scanned_node->node_list;
-	mm->prev_scanned_node = node;
-
-	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
-	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
+	node->node_list.next = &scan->prev_node->node_list;
+	scan->prev_node = node;
 
-	if (mm->scan_check_range) {
-		if (adj_start < mm->scan_start)
-			adj_start = mm->scan_start;
-		if (adj_end > mm->scan_end)
-			adj_end = mm->scan_end;
-	}
+	adj_start = __drm_mm_hole_node_start(prev_node);
+	adj_end = __drm_mm_hole_node_end(prev_node);
 
 	if (mm->color_adjust)
-		mm->color_adjust(prev_node, mm->scan_color,
-				 &adj_start, &adj_end);
+		mm->color_adjust(prev_node, scan->color, &adj_start, &adj_end);
 
-	if (check_free_hole(adj_start, adj_end,
-			    mm->scan_size, mm->scan_alignment)) {
-		mm->scan_hit_start = hole_start;
-		mm->scan_hit_end = hole_end;
+	if (adj_start < scan->start)
+		adj_start = scan->start;
+	if (adj_end > scan->end)
+		adj_end = scan->end;
+
+	if (scan->flags == DRM_MM_INSERT_HIGH)
+		adj_start = adj_end - scan->size;
+
+	if (scan->alignment) {
+		u64 rem;
+
+		if (scan->alignment_mask)
+			rem = adj_start & scan->alignment_mask;
+		else
+			div64_u64_rem(adj_start, scan->alignment, &rem);
+		if (rem) {
+			adj_start -= rem;
+			if (scan->flags != DRM_MM_INSERT_HIGH)
+				adj_start += scan->alignment;
+			if (adj_start < __drm_mm_hole_node_start(prev_node))
+				return false;
+		}
+	}
+
+	if (adj_end > adj_start && adj_end - adj_start >= scan->size) {
+		scan->hit_start = adj_start;
+		scan->hit_end = adj_end;
 		return true;
 	}
 
@@ -793,44 +683,25 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
  * True if this block should be evicted, false otherwise. Will always
  * return false when no hole has been found.
  */
-bool drm_mm_scan_remove_block(struct drm_mm_node *node)
+bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
+			      struct drm_mm_node *node)
 {
-	struct drm_mm *mm = node->mm;
 	struct drm_mm_node *prev_node;
 
-	mm->scanned_blocks--;
-
 	BUG_ON(!node->scanned_block);
 	node->scanned_block = 0;
 
 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
 			       node_list);
 
-	prev_node->hole_follows = node->scanned_preceeds_hole;
 	list_add(&node->node_list, &prev_node->node_list);
 
-	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
-		 node->start < mm->scan_hit_end);
+	return (node->start + node->size > scan->hit_start &&
+		node->start < scan->hit_end);
 }
 EXPORT_SYMBOL(drm_mm_scan_remove_block);
 
 /**
- * drm_mm_clean - checks whether an allocator is clean
- * @mm: drm_mm allocator to check
- *
- * Returns:
- * True if the allocator is completely free, false if there's still a node
- * allocated in it.
- */
-bool drm_mm_clean(struct drm_mm * mm)
-{
-	struct list_head *head = &mm->head_node.node_list;
-
-	return (head->next->next == head);
-}
-EXPORT_SYMBOL(drm_mm_clean);
-
-/**
  * drm_mm_init - initialize a drm-mm allocator
  * @mm: the drm_mm structure to initialize
  * @start: start of the range managed by @mm
@@ -840,23 +711,20 @@ EXPORT_SYMBOL(drm_mm_clean);
  */
 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
 {
+	mm->color_adjust = NULL;
+
 	INIT_LIST_HEAD(&mm->hole_stack);
-	mm->scanned_blocks = 0;
+	mm->interval_tree = RB_ROOT;
+	mm->holes_size = RB_ROOT;
+	mm->holes_addr = RB_ROOT;
 
 	/* Clever trick to avoid a special case in the free hole tracking. */
 	INIT_LIST_HEAD(&mm->head_node.node_list);
-	mm->head_node.hole_follows = 1;
-	mm->head_node.scanned_block = 0;
-	mm->head_node.scanned_prev_free = 0;
-	mm->head_node.scanned_next_free = 0;
+	mm->head_node.scanned_block = 1;
 	mm->head_node.mm = mm;
 	mm->head_node.start = start + size;
-	mm->head_node.size = start - mm->head_node.start;
-	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
-
-	mm->interval_tree = RB_ROOT;
-
-	mm->color_adjust = NULL;
+	mm->head_node.size = -size;
+	add_hole(&mm->head_node);
 }
 EXPORT_SYMBOL(drm_mm_init);
 
@@ -869,8 +737,7 @@ EXPORT_SYMBOL(drm_mm_init);
  */
 void drm_mm_takedown(struct drm_mm * mm)
 {
-	WARN(!list_empty(&mm->head_node.node_list),
-	     "Memory manager not clean during takedown.\n");
+	WARN(!drm_mm_clean(mm), "Memory manager not clean during takedown.\n");
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 
@@ -879,7 +746,7 @@ static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
 {
 	u64 hole_start, hole_end, hole_size;
 
-	if (entry->hole_follows) {
+	if (entry->hole_size) {
 		hole_start = drm_mm_hole_node_start(entry);
 		hole_end = drm_mm_hole_node_end(entry);
 		hole_size = hole_end - hole_start;
@@ -921,7 +788,7 @@ static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
 {
 	u64 hole_start, hole_end, hole_size;
 
-	if (entry->hole_follows) {
+	if (entry->hole_size) {
 		hole_start = drm_mm_hole_node_start(entry);
 		hole_end = drm_mm_hole_node_end(entry);
 		hole_size = hole_end - hole_start;
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 0aef432679f9..31a1f8294441 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -213,8 +213,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
 		goto out_unlock;
 	}
 
-	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
-				 pages, 0, DRM_MM_SEARCH_DEFAULT);
+	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
 	if (ret)
 		goto out_unlock;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 89092994d5bf..2bb9ee67d588 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -70,8 +70,7 @@ insert_mappable_node(struct drm_i915_private *i915,
 	return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
 						   size, 0, -1,
 						   0, i915->ggtt.mappable_end,
-						   DRM_MM_SEARCH_DEFAULT,
-						   DRM_MM_CREATE_DEFAULT);
+						   DRM_MM_INSERT_LOW);
 }
 
 static void
@@ -2898,11 +2897,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 		return true;
 
 	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
-	if (other->allocated && !other->hole_follows && other->color != cache_level)
+	if (other->allocated && !other->hole_size && other->color != cache_level)
 		return false;
 
 	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
-	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+	if (other->allocated && !gtt_space->hole_size && other->color != cache_level)
 		return false;
 
 	return true;
@@ -2991,15 +2990,11 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 				goto err_unpin;
 		}
 	} else {
-		u32 search_flag, alloc_flag;
+		unsigned mmflags;
 
-		if (flags & PIN_HIGH) {
-			search_flag = DRM_MM_SEARCH_BELOW;
-			alloc_flag = DRM_MM_CREATE_TOP;
-		} else {
-			search_flag = DRM_MM_SEARCH_DEFAULT;
-			alloc_flag = DRM_MM_CREATE_DEFAULT;
-		}
+		mmflags = 0;
+		if (flags & PIN_HIGH)
+			mmflags = DRM_MM_INSERT_HIGH;
 
 		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
 		 * so we know that we always have a minimum alignment of 4096.
@@ -3016,8 +3011,7 @@ search_free:
 							  size, alignment,
 							  obj->cache_level,
 							  start, end,
-							  search_flag,
-							  alloc_flag);
+							  mmflags);
 		if (ret) {
 			if (flags & PIN_NOEVICT)
 				goto err_unpin;
@@ -3027,7 +3021,7 @@ search_free:
 						       start, end,
 						       flags);
 			if (ret == 0) {
-				search_flag = DRM_MM_SEARCH_DEFAULT;
+				mmflags = DRM_MM_INSERT_EVICT;
 				goto search_free;
 			}
 
@@ -4081,6 +4075,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	 * the GTT either for the user or for scanout). Those VMA still need to
 	 * unbound now.
 	 */
+	GEM_BUG_ON(i915_gem_object_is_active(obj));
 	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
 		GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 		GEM_BUG_ON(i915_vma_is_active(vma));
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index f7853d0f19b4..a28b3b0b3239 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -47,7 +47,10 @@ gpu_is_idle(struct drm_i915_private *dev_priv)
 }
 
 static bool
-mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
+mark_free(struct drm_mm_scan *scan,
+	  struct i915_vma *vma,
+	  unsigned int flags,
+	  struct list_head *unwind)
 {
 	if (i915_vma_is_pinned(vma))
 		return false;
@@ -56,7 +59,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
 		return false;
 
 	list_add(&vma->evict_link, unwind);
-	return drm_mm_scan_add_block(&vma->node);
+	return drm_mm_scan_add_block(scan, &vma->node);
 }
 
 /**
@@ -90,6 +93,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 			 unsigned flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(vm->dev);
+	struct drm_mm_scan scan;
 	struct list_head eviction_list;
 	struct list_head *phases[] = {
 		&vm->inactive_list,
@@ -114,12 +118,9 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	 * On each list, the oldest objects lie at the HEAD with the freshest
 	 * object on the TAIL.
 	 */
-	if (start != 0 || end != vm->total) {
-		drm_mm_init_scan_with_range(&vm->mm, min_size,
-					    alignment, cache_level,
-					    start, end);
-	} else
-		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
+	drm_mm_init_scan_with_range(&scan, min_size, alignment, cache_level,
+				    start, end,
+				    flags & PIN_HIGH ? DRM_MM_INSERT_HIGH : 0);
 
 	if (flags & PIN_NONBLOCK)
 		phases[1] = NULL;
@@ -129,13 +130,13 @@ search_again:
 	phase = phases;
 	do {
 		list_for_each_entry(vma, *phase, vm_link)
-			if (mark_free(vma, flags, &eviction_list))
+			if (mark_free(&scan, vma, flags, &eviction_list))
 				goto found;
 	} while (*++phase);
 
 	/* Nothing found, clean up and bail out! */
 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
-		ret = drm_mm_scan_remove_block(&vma->node);
+		ret = drm_mm_scan_remove_block(&scan, &vma->node);
 		BUG_ON(ret);
 	}
 
@@ -180,7 +181,7 @@ found:
 	 * of any of our objects, thus corrupting the list).
 	 */
 	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
-		if (drm_mm_scan_remove_block(&vma->node))
+		if (drm_mm_scan_remove_block(&scan, &vma->node))
 			__i915_vma_pin(vma);
 		else
 			list_del(&vma->evict_link);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 733f8697df19..520145cb2580 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -801,8 +801,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 				(&ggtt->base.mm, &cache->node,
 				 4096, 0, -1,
 				 0, ggtt->mappable_end,
-				 DRM_MM_SEARCH_DEFAULT,
-				 DRM_MM_CREATE_DEFAULT);
+				 DRM_MM_INSERT_LOW);
 			if (ret)
 				return ERR_PTR(ret);
 		} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ff5f5287cf23..fb543941578f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1997,7 +1997,7 @@ alloc:
 	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, &ppgtt->node,
 						  GEN6_PD_SIZE, GEN6_PD_ALIGN,
 						  -1, 0, ggtt->base.total,
-						  DRM_MM_TOPDOWN);
+						  DRM_MM_INSERT_HIGH);
 	if (ret == -ENOSPC && !retried) {
 		ret = i915_gem_evict_something(&ggtt->base,
 					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
@@ -2753,7 +2753,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
 						  &ggtt->gpu_error,
 						  4096, 0, -1,
 						  0, ggtt->mappable_end,
-						  0, 0);
+						  0);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index c3dcfb724966..e137824cbd59 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -63,8 +63,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
 
 	mutex_lock(&dev_priv->mm.stolen_lock);
 	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
-					  alignment, start, end,
-					  DRM_MM_SEARCH_DEFAULT);
+					  alignment, start, end, 0);
 	mutex_unlock(&dev_priv->mm.stolen_lock);
 
 	return ret;
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 03defda77766..1622db24cd39 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,8 +109,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
 	if (pool == AGP_TYPE) {
 		retval = drm_mm_insert_node(&dev_priv->agp_mm,
 					    &item->mm_node,
-					    mem->size, 0,
-					    DRM_MM_SEARCH_DEFAULT);
+					    mem->size);
 		offset = item->mm_node.start;
 	} else {
 #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -122,8 +121,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
 #else
 		retval = drm_mm_insert_node(&dev_priv->vram_mm,
 					    &item->mm_node,
-					    mem->size, 0,
-					    DRM_MM_SEARCH_DEFAULT);
+					    mem->size);
 		offset = item->mm_node.start;
 #endif
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index aa0bd054d3e9..a3ddc95825f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -54,9 +54,8 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 {
 	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 	struct drm_mm *mm = &rman->mm;
-	struct drm_mm_node *node = NULL;
-	enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
-	enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
+	struct drm_mm_node *node;
+	enum drm_mm_flags flags;
 	unsigned long lpfn;
 	int ret;
 
@@ -68,16 +67,14 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 	if (!node)
 		return -ENOMEM;
 
-	if (place->flags & TTM_PL_FLAG_TOPDOWN) {
-		sflags = DRM_MM_SEARCH_BELOW;
-		aflags = DRM_MM_CREATE_TOP;
-	}
+	flags = 0;
+	if (place->flags & TTM_PL_FLAG_TOPDOWN)
+		flags = DRM_MM_INSERT_HIGH;
 
 	spin_lock(&rman->lock);
 	ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
 					  mem->page_alignment, 0,
-					  place->fpfn, lpfn,
-					  sflags, aflags);
+					  place->fpfn, lpfn, flags);
 	spin_unlock(&rman->lock);
 
 	if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index a04ef1c992d9..4217d66a5cc6 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
 	if (mem->type == VIA_MEM_AGP)
 		retval = drm_mm_insert_node(&dev_priv->agp_mm,
 					    &item->mm_node,
-					    tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
+					    tmpSize);
 	else
 		retval = drm_mm_insert_node(&dev_priv->vram_mm,
 					    &item->mm_node,
-					    tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
+					    tmpSize);
 	if (retval)
 		goto fail_alloc;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index aa04fb0159a7..77cb7c627e09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -673,16 +673,10 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
  
 	memset(info->node, 0, sizeof(*info->node));
 	spin_lock_bh(&man->lock);
-	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
-					 0, 0,
-					 DRM_MM_SEARCH_DEFAULT,
-					 DRM_MM_CREATE_DEFAULT);
+	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	if (ret) {
 		vmw_cmdbuf_man_process(man);
-		ret = drm_mm_insert_node_generic(&man->mm, info->node,
-						 info->page_size, 0, 0,
-						 DRM_MM_SEARCH_DEFAULT,
-						 DRM_MM_CREATE_DEFAULT);
+		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 	}
 
 	spin_unlock_bh(&man->lock);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index ee092061b404..6b4025833e6f 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -45,35 +45,27 @@
 #include <linux/seq_file.h>
 #endif
 
-enum drm_mm_search_flags {
-	DRM_MM_SEARCH_DEFAULT =		0,
-	DRM_MM_SEARCH_BEST =		1 << 0,
-	DRM_MM_SEARCH_BELOW =		1 << 1,
+enum drm_mm_flags {
+	DRM_MM_INSERT_BEST = 0,
+	DRM_MM_INSERT_LOW,
+	DRM_MM_INSERT_HIGH,
+	DRM_MM_INSERT_EVICT,
 };
 
-enum drm_mm_allocator_flags {
-	DRM_MM_CREATE_DEFAULT =		0,
-	DRM_MM_CREATE_TOP =		1 << 0,
-};
-
-#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
-#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
-
 struct drm_mm_node {
+	struct drm_mm *mm;
 	struct list_head node_list;
 	struct list_head hole_stack;
 	struct rb_node rb;
-	unsigned hole_follows : 1;
-	unsigned scanned_block : 1;
-	unsigned scanned_prev_free : 1;
-	unsigned scanned_next_free : 1;
-	unsigned scanned_preceeds_hole : 1;
-	unsigned allocated : 1;
-	unsigned long color;
+	struct rb_node rb_hole_size;
+	struct rb_node rb_hole_addr;
 	u64 start;
 	u64 size;
 	u64 __subtree_last;
-	struct drm_mm *mm;
+	u64 hole_size;
+	unsigned long color;
+	unsigned allocated : 1;
+	unsigned scanned_block : 1;
 };
 
 struct drm_mm {
@@ -84,22 +76,28 @@ struct drm_mm {
 	struct drm_mm_node head_node;
 	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
 	struct rb_root interval_tree;
-
-	unsigned int scan_check_range : 1;
-	unsigned int scanned_blocks;
-	unsigned long scan_color;
-	u64 scan_alignment;
-	u64 scan_size;
-	u64 scan_hit_start;
-	u64 scan_hit_end;
-	u64 scan_start;
-	u64 scan_end;
-	struct drm_mm_node *prev_scanned_node;
+	struct rb_root holes_size;
+	struct rb_root holes_addr;
 
 	void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
 			     u64 *start, u64 *end);
 };
 
+struct drm_mm_scan {
+	struct drm_mm_node *prev_node;
+
+	u64 size;
+	u64 alignment;
+	u64 alignment_mask;
+	u64 start;
+	u64 end;
+	u64 hit_start;
+	u64 hit_end;
+
+	unsigned long color;
+	unsigned int flags;
+};
+
 /**
  * drm_mm_node_allocated - checks whether a node is allocated
  * @node: drm_mm_node to check
@@ -148,7 +146,7 @@ static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
  */
 static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 {
-	BUG_ON(!hole_node->hole_follows);
+	BUG_ON(!hole_node->hole_size);
 	return __drm_mm_hole_node_start(hole_node);
 }
 
@@ -219,14 +217,39 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  * Basic range manager support (drm_mm.c)
  */
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
+int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+					struct drm_mm_node *node,
+					u64 size,
+					u64 alignment,
+					unsigned long color,
+					u64 start,
+					u64 end,
+					unsigned flags);
+
+/**
+ * drm_mm_insert_node_generic - search for space and insert @node
+ * @mm: drm_mm to allocate from
+ * @node: preallocate node to insert
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for this node
+ * @flags: flags to fine-tune the allocation search and creation
+ *
+ * The preallocated node must be cleared to 0.
+ *
+ * Returns:
+ * 0 on success, -ENOSPC if there's no suitable hole.
+ */
+static inline int
+drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
+			   u64 size, u64 alignment,
+			   unsigned long color,
+			   unsigned flags)
+{
+	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
+						   color, 0, ~0ull, flags);
+}
 
-int drm_mm_insert_node_generic(struct drm_mm *mm,
-			       struct drm_mm_node *node,
-			       u64 size,
-			       u64 alignment,
-			       unsigned long color,
-			       enum drm_mm_search_flags sflags,
-			       enum drm_mm_allocator_flags aflags);
 /**
  * drm_mm_insert_node - search for space and insert @node
  * @mm: drm_mm to allocate from
@@ -245,23 +268,11 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
  */
 static inline int drm_mm_insert_node(struct drm_mm *mm,
 				     struct drm_mm_node *node,
-				     u64 size,
-				     u64 alignment,
-				     enum drm_mm_search_flags flags)
+				     u64 size)
 {
-	return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
-					  DRM_MM_CREATE_DEFAULT);
+	return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
 }
 
-int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
-					struct drm_mm_node *node,
-					u64 size,
-					u64 alignment,
-					unsigned long color,
-					u64 start,
-					u64 end,
-					enum drm_mm_search_flags sflags,
-					enum drm_mm_allocator_flags aflags);
 /**
  * drm_mm_insert_node_in_range - ranged search for space and insert @node
  * @mm: drm_mm to allocate from
@@ -286,11 +297,10 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
 					      u64 alignment,
 					      u64 start,
 					      u64 end,
-					      enum drm_mm_search_flags flags)
+					      unsigned flags)
 {
 	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
-						   0, start, end, flags,
-						   DRM_MM_CREATE_DEFAULT);
+						   0, start, end, flags);
 }
 
 void drm_mm_remove_node(struct drm_mm_node *node);
@@ -299,7 +309,19 @@ void drm_mm_init(struct drm_mm *mm,
 		 u64 start,
 		 u64 size);
 void drm_mm_takedown(struct drm_mm *mm);
-bool drm_mm_clean(struct drm_mm *mm);
+
+/**
+ * drm_mm_clean - checks whether an allocator is clean
+ * @mm: drm_mm allocator to check
+ *
+ * Returns:
+ * True if the allocator is completely free, false if there's still a node
+ * allocated in it.
+ */
+static inline bool drm_mm_clean(struct drm_mm *mm)
+{
+	return list_empty(&mm->head_node.node_list);
+}
 
 struct drm_mm_node *
 drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
@@ -307,18 +329,39 @@ drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
 struct drm_mm_node *
 drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
 
-void drm_mm_init_scan(struct drm_mm *mm,
-		      u64 size,
-		      u64 alignment,
-		      unsigned long color);
-void drm_mm_init_scan_with_range(struct drm_mm *mm,
-				 u64 size,
-				 u64 alignment,
-				 unsigned long color,
-				 u64 start,
-				 u64 end);
-bool drm_mm_scan_add_block(struct drm_mm_node *node);
-bool drm_mm_scan_remove_block(struct drm_mm_node *node);
+void drm_mm_init_scan_with_range(struct drm_mm_scan *scan,
+				 u64 size, u64 alignment, unsigned long color,
+				 u64 start, u64 end,
+				 unsigned flags);
+
+/**
+ * drm_mm_init_scan - initialize lru scanning
+ * @mm: drm_mm to scan
+ * @size: size of the allocation
+ * @alignment: alignment of the allocation
+ * @color: opaque tag value to use for the allocation
+ * @flags: flags to specify how the allocation will be performed afterwards
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning:
+ * As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+static inline void drm_mm_init_scan(struct drm_mm_scan *scan,
+				    u64 size,
+				    u64 alignment,
+				    unsigned long color,
+				    unsigned flags)
+{
+	return drm_mm_init_scan_with_range(scan, size, alignment, color,
+					   0, ~0ull, flags);
+}
+bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
+			   struct drm_mm_node *node);
+bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
+			      struct drm_mm_node *node);
 
 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
 #ifdef CONFIG_DEBUG_FS
-- 
2.8.1



More information about the Intel-gfx-trybot mailing list