[PATCH 58/59] drm/i915: Eliminate struct_mutex for the shrinker

Chris Wilson chris at chris-wilson.co.uk
Wed Jun 27 20:36:25 UTC 2018


We have replaced the struct_mutex with per-vm and per-object mutexes
(which are not allowed to be held while allocating, and only held while
the object is unshrinkable respectively), but still required the
struct_mutex for controlling access to zombie objects (refcount == 0
waiting for the rcu worker to be freed). We must forgo the immediate
reclaim of those zombie pages, in order to hold a reference to the object
as we discard.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_shrinker.c | 97 ++----------------------
 1 file changed, 7 insertions(+), 90 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 15e8e563f8a8..14b49dacac88 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -36,42 +36,6 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 
-static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
-{
-	switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
-	case MUTEX_TRYLOCK_RECURSIVE:
-		*unlock = false;
-		return true;
-
-	case MUTEX_TRYLOCK_FAILED:
-		*unlock = false;
-		preempt_disable();
-		do {
-			cpu_relax();
-			if (mutex_trylock(&i915->drm.struct_mutex)) {
-				*unlock = true;
-				break;
-			}
-		} while (!need_resched());
-		preempt_enable();
-		return *unlock;
-
-	case MUTEX_TRYLOCK_SUCCESS:
-		*unlock = true;
-		return true;
-	}
-
-	BUG();
-}
-
-static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
-{
-	if (!unlock)
-		return;
-
-	mutex_unlock(&i915->drm.struct_mutex);
-}
-
 static bool swap_available(void)
 {
 	return get_nr_swap_pages() > 0;
@@ -159,10 +123,8 @@ i915_gem_shrink(struct drm_i915_private *i915,
 	}, *phase;
 	unsigned long count = 0;
 	unsigned long scanned = 0;
-	bool unlock;
 
-	if (!shrinker_lock(i915, &unlock))
-		return 0;
+	trace_i915_gem_shrink(i915, target, flags);
 
 	/*
 	 * When shrinking the active list, also consider active contexts.
@@ -174,10 +136,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
 	 * we will free as much as we can and hope to get a second chance.
 	 */
 	if (flags & I915_SHRINK_ACTIVE)
-		i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
-
-	trace_i915_gem_shrink(i915, target, flags);
-	i915_retire_requests(i915);
+		i915_gem_wait_for_idle(i915, I915_WAIT_FOR_IDLE_BOOST);
 
 	/*
 	 * Unbinding of objects will require HW access; Let us not wake the
@@ -245,6 +204,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
 			if (!can_release_pages(obj))
 				continue;
 
+			if (!kref_get_unless_zero(&obj->base.refcount))
+				continue;
+
 			spin_unlock(&i915->mm.obj_lock);
 
 			if (unsafe_drop_pages(obj)) {
@@ -259,6 +221,8 @@ i915_gem_shrink(struct drm_i915_private *i915,
 			}
 			scanned += obj->base.size >> PAGE_SHIFT;
 
+			i915_gem_object_put(obj);
+
 			spin_lock(&i915->mm.obj_lock);
 		}
 		list_splice_tail(&still_in_list, phase->list);
@@ -268,10 +232,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
 	if (flags & I915_SHRINK_BOUND)
 		intel_runtime_pm_put(i915);
 
-	i915_retire_requests(i915);
-
-	shrinker_unlock(i915, unlock);
-
 	if (nr_scanned)
 		*nr_scanned += scanned;
 	return count;
@@ -340,13 +300,9 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 	struct drm_i915_private *i915 =
 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
 	unsigned long freed;
-	bool unlock;
 
 	sc->nr_scanned = 0;
 
-	if (!shrinker_lock(i915, &unlock))
-		return SHRINK_STOP;
-
 	freed = i915_gem_shrink(i915,
 				sc->nr_to_scan,
 				&sc->nr_scanned,
@@ -363,35 +319,9 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 		intel_runtime_pm_put(i915);
 	}
 
-	shrinker_unlock(i915, unlock);
-
 	return sc->nr_scanned ? freed : SHRINK_STOP;
 }
 
-static bool
-shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
-			      int timeout_ms)
-{
-	unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
-
-	do {
-		if (i915_gem_wait_for_idle(i915, 0) == 0 &&
-		    shrinker_lock(i915, unlock))
-			break;
-
-		schedule_timeout_killable(1);
-		if (fatal_signal_pending(current))
-			return false;
-
-		if (time_after(jiffies, timeout)) {
-			pr_err("Unable to lock GPU to purge memory.\n");
-			return false;
-		}
-	} while (1);
-
-	return true;
-}
-
 static int
 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 {
@@ -442,16 +372,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
 	struct i915_vma *vma, *next;
 	unsigned long freed_pages = 0;
-	bool unlock;
-	int ret;
-
-	if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
-		return NOTIFY_DONE;
-
-	/* Force everything onto the inactive lists */
-	ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
-	if (ret)
-		goto out;
 
 	intel_runtime_pm_get(i915);
 	freed_pages += i915_gem_shrink(i915, -1UL, NULL,
@@ -475,9 +395,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 	}
 	mutex_unlock(&i915->ggtt.vm.mutex);
 
-out:
-	shrinker_unlock(i915, unlock);
-
 	*(unsigned long *)ptr += freed_pages;
 	return NOTIFY_DONE;
 }
-- 
2.18.0



More information about the Intel-gfx-trybot mailing list