[PATCH] drm/mm: Only allow sleeping if the caller permits

Chris Wilson chris at chris-wilson.co.uk
Mon Mar 23 18:36:06 UTC 2020


Sometimes the drm_mm is searched from within an atomic context (yikes!)
so we must be cautious and not insert a schedule() unless the caller
indicates it is safe to do so.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/1509
Fixes: 7be1b9b8e9d1 ("drm/mm: Break long searches in fragmented address spaces")
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 drivers/gpu/drm/drm_mm.c                      | 21 ++++++++++---------
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  3 ++-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c    |  4 +++-
 drivers/gpu/drm/i915/i915_gem.c               |  3 ++-
 drivers/gpu/drm/i915/i915_gem_gtt.c           | 10 ++++++---
 include/drm/drm_mm.h                          | 11 ++++++++++
 6 files changed, 36 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index bc6e208949e8..794d9d4c2d36 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -344,7 +344,7 @@ first_hole(struct drm_mm *mm,
 	   u64 start, u64 end, u64 size,
 	   enum drm_mm_insert_mode mode)
 {
-	switch (mode) {
+	switch (mode & DRM_MM_INSERT_MODE) {
 	default:
 	case DRM_MM_INSERT_BEST:
 		return best_hole(mm, size);
@@ -367,12 +367,17 @@ next_hole(struct drm_mm *mm,
 	  struct drm_mm_node *node,
 	  enum drm_mm_insert_mode mode)
 {
+	if (mode & DRM_MM_INSERT_ONCE)
+		return NULL; /* check only the first hit */
+
 	/* Searching is slow; check if we ran out of time/patience */
-	cond_resched();
-	if (fatal_signal_pending(current))
-		return NULL;
+	if (mode & DRM_MM_INSERT_INTERRUPTIBLE) {
+		cond_resched();
+		if (fatal_signal_pending(current))
+			return NULL;
+	}
 
-	switch (mode) {
+	switch (mode & DRM_MM_INSERT_MODE) {
 	default:
 	case DRM_MM_INSERT_BEST:
 		return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
@@ -476,7 +481,6 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 {
 	struct drm_mm_node *hole;
 	u64 remainder_mask;
-	bool once;
 
 	DRM_MM_BUG_ON(range_start > range_end);
 
@@ -489,13 +493,10 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 	if (alignment <= 1)
 		alignment = 0;
 
-	once = mode & DRM_MM_INSERT_ONCE;
-	mode &= ~DRM_MM_INSERT_ONCE;
-
 	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 	for (hole = first_hole(mm, range_start, range_end, size, mode);
 	     hole;
-	     hole = once ? NULL : next_hole(mm, hole, mode)) {
+	     hole = next_hole(mm, hole, mode)) {
 		u64 hole_start = __drm_mm_hole_node_start(hole);
 		u64 hole_end = hole_start + hole->hole_size;
 		u64 adj_start, adj_end;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 6b3013d20851..1e06f5e36bbd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1057,7 +1057,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
 				(&ggtt->vm.mm, &cache->node,
 				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
 				 0, ggtt->mappable_end,
-				 DRM_MM_INSERT_LOW);
+				 DRM_MM_INSERT_LOW |
+				 DRM_MM_INSERT_INTERRUPTIBLE);
 			mutex_unlock(&ggtt->vm.mutex);
 			if (err) /* no inactive aperture space, use cpu reloc */
 				return NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 5557dfa83a7b..04403f8a7d3a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -43,7 +43,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
 	mutex_lock(&i915->mm.stolen_lock);
 	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
 					  size, alignment, 0,
-					  start, end, DRM_MM_INSERT_BEST);
+					  start, end,
+					  DRM_MM_INSERT_BEST |
+					  DRM_MM_INSERT_INTERRUPTIBLE);
 	mutex_unlock(&i915->mm.stolen_lock);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 762b50b08d73..9099c311f984 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -69,7 +69,8 @@ insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
 					  size, 0, I915_COLOR_UNEVICTABLE,
 					  0, ggtt->mappable_end,
-					  DRM_MM_INSERT_LOW);
+					  DRM_MM_INSERT_LOW |
+					  DRM_MM_INSERT_INTERRUPTIBLE);
 
 	mutex_unlock(&ggtt->vm.mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cb43381b0d37..899b2af13840 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -232,7 +232,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 
 	err = drm_mm_insert_node_in_range(&vm->mm, node,
 					  size, alignment, color,
-					  start, end, mode);
+					  start, end,
+					  mode | DRM_MM_INSERT_INTERRUPTIBLE);
 	if (err != -ENOSPC)
 		return err;
 
@@ -240,7 +241,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 		err = drm_mm_insert_node_in_range(&vm->mm, node,
 						  size, alignment, color,
 						  start, end,
-						  DRM_MM_INSERT_BEST);
+						  DRM_MM_INSERT_BEST |
+						  DRM_MM_INSERT_INTERRUPTIBLE);
 		if (err != -ENOSPC)
 			return err;
 	}
@@ -288,7 +290,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 
 	return drm_mm_insert_node_in_range(&vm->mm, node,
 					   size, alignment, color,
-					   start, end, DRM_MM_INSERT_EVICT);
+					   start, end,
+					   DRM_MM_INSERT_EVICT |
+					   DRM_MM_INSERT_INTERRUPTIBLE);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index ee8b0e80ca90..cc261e33143e 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -141,7 +141,18 @@ enum drm_mm_insert_mode {
 	 * Does not search all holes.
 	 */
 	DRM_MM_INSERT_LOWEST  = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
+
+	/**
+	 * @DRM_MM_INSERT_INTERRUPTIBLE:
+	 *
+	 * Check for pending signals and allow rescheduling admist the
+	 * search. In some heavily fragmented cases, searching for an available
+	 * node of just the right size can take a long time, in which case it
+	 * is better to let something else run during our fruitless search.
+	 */
+	DRM_MM_INSERT_INTERRUPTIBLE = BIT(30),
 };
+#define DRM_MM_INSERT_MODE GENMASK(29, 0) /* all but the special bits */
 
 /**
  * struct drm_mm_node - allocated block in the DRM allocator
-- 
2.20.1



More information about the dri-devel mailing list