[PATCH v2 2/3] drm/msm/shrinker: We can vmap shrink active_list too

Rob Clark robdclark at gmail.com
Mon Nov 16 17:48:50 UTC 2020


From: Rob Clark <robdclark at chromium.org>

Just because a obj is active, if the vmap_count is zero, we can still
tear down the vmap.

Signed-off-by: Rob Clark <robdclark at chromium.org>
---
 drivers/gpu/drm/msm/msm_gem_shrinker.c | 47 +++++++++++++++++++-------
 1 file changed, 35 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 6f4b1355725f..9d51c1eb808d 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -6,6 +6,7 @@
 
 #include "msm_drv.h"
 #include "msm_gem.h"
+#include "msm_gpu.h"
 #include "msm_gpu_trace.h"
 
 static unsigned long
@@ -61,17 +62,19 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 	return freed;
 }
 
-static int
-msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+/* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+static const int vmap_shrink_limit = 15;
+
+static unsigned
+vmap_shrink(struct list_head *mm_list)
 {
-	struct msm_drm_private *priv =
-		container_of(nb, struct msm_drm_private, vmap_notifier);
 	struct msm_gem_object *msm_obj;
 	unsigned unmapped = 0;
 
-	mutex_lock(&priv->mm_lock);
-
-	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+	list_for_each_entry(msm_obj, mm_list, mm_list) {
 		if (!msm_gem_trylock(&msm_obj->base))
 			continue;
 		if (is_vunmapable(msm_obj)) {
@@ -80,11 +83,31 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
 		}
 		msm_gem_unlock(&msm_obj->base);
 
-		/* since we don't know any better, lets bail after a few
-		 * and if necessary the shrinker will be invoked again.
-		 * Seems better than unmapping *everything*
-		 */
-		if (++unmapped >= 15)
+		if (++unmapped >= vmap_shrink_limit)
+			break;
+	}
+
+	return unmapped;
+}
+
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+	struct msm_drm_private *priv =
+		container_of(nb, struct msm_drm_private, vmap_notifier);
+	struct list_head *mm_lists[] = {
+		&priv->inactive_list,
+		priv->gpu ? &priv->gpu->active_list : NULL,
+		NULL,
+	};
+	unsigned idx, unmapped = 0;
+
+	mutex_lock(&priv->mm_lock);
+
+	for (idx = 0; mm_lists[idx]; idx++) {
+		unmapped += vmap_shrink(mm_lists[idx]);
+
+		if (unmapped >= vmap_shrink_limit)
 			break;
 	}
 
-- 
2.28.0



More information about the dri-devel mailing list