[PATCH 39/40] drm/i915: Report all objects with allocated pages to the shrinker

Chris Wilson chris at chris-wilson.co.uk
Tue Jun 26 20:52:08 UTC 2018


Currently, we try to report to the shrinker the precise number of
objects (pages) that are available to be reaped at this moment. This
requires searching all objects with allocated pages to see if they
fulfill the search criteria, and this count is performed quite
frequently. (The shrinker tries to free ~128 pages on each invocation,
before which we count all the objects; counting takes longer than
unbinding the objects!) If we take the pragmatic view that with
sufficient desire, all objects are eventually reapable (they become
inactive, or no longer used as framebuffer etc), we can simply return
the count of pinned pages maintained during get_pages/put_pages rather
than walk the lists every time.

The downside is that we may (slightly) over-report the number of
objects/pages we could shrink and so penalize ourselves by shrinking
more than required. This is mitigated by keeping the order in which we
shrink objects such that we avoid penalizing active and frequently used
objects, and if memory is so tight that we need to free them we would
need to anyway.

v2: Only expose shrinkable objects to the shrinker; a small reduction in
not considering stolen and foreign objects.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c      | 10 ++--
 drivers/gpu/drm/i915/i915_drv.h          |  7 +--
 drivers/gpu/drm/i915/i915_gem.c          | 73 +++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_shrinker.c | 28 ++-------
 drivers/gpu/drm/i915/i915_gem_stolen.c   |  3 +-
 drivers/gpu/drm/i915/i915_vma.c          | 23 +++++---
 6 files changed, 64 insertions(+), 80 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f1113fab12cf..ded51c9c4dd2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -245,7 +245,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 	unsigned long total, count, n;
 	int ret;
 
-	total = READ_ONCE(dev_priv->mm.object_count);
+	total = READ_ONCE(dev_priv->mm.shrink_count);
 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
 	if (!objects)
 		return -ENOMEM;
@@ -436,9 +436,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	if (ret)
 		return ret;
 
-	seq_printf(m, "%u objects, %llu bytes\n",
-		   dev_priv->mm.object_count,
-		   dev_priv->mm.object_memory);
+	seq_printf(m, "%u shrinkable objects, %llu bytes\n",
+		   dev_priv->mm.shrink_count,
+		   dev_priv->mm.shrink_memory);
 
 	size = count = 0;
 	mapped_size = mapped_count = 0;
@@ -568,7 +568,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 	unsigned long nobject, n;
 	int count, ret;
 
-	nobject = READ_ONCE(dev_priv->mm.object_count);
+	nobject = READ_ONCE(dev_priv->mm.shrink_count);
 	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
 	if (!objects)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 58f4ba7c0c2c..cb2569d63204 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -991,10 +991,9 @@ struct i915_gem_mm {
 	/** Bit 6 swizzling required for Y tiling */
 	uint32_t bit_6_swizzle_y;
 
-	/* accounting, useful for userland debugging */
-	spinlock_t object_stat_lock;
-	u64 object_memory;
-	u32 object_count;
+	/* shrinker accounting, also useful for userland debugging */
+	u64 shrink_memory;
+	u32 shrink_count;
 };
 
 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ae54b75e930e..267b01165d40 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -80,25 +80,6 @@ remove_mappable_node(struct drm_mm_node *node)
 	drm_mm_remove_node(node);
 }
 
-/* some bookkeeping */
-static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
-				  u64 size)
-{
-	spin_lock(&dev_priv->mm.object_stat_lock);
-	dev_priv->mm.object_count++;
-	dev_priv->mm.object_memory += size;
-	spin_unlock(&dev_priv->mm.object_stat_lock);
-}
-
-static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
-				     u64 size)
-{
-	spin_lock(&dev_priv->mm.object_stat_lock);
-	dev_priv->mm.object_count--;
-	dev_priv->mm.object_memory -= size;
-	spin_unlock(&dev_priv->mm.object_stat_lock);
-}
-
 static int
 i915_gem_wait_for_error(struct i915_gpu_error *error)
 {
@@ -1696,7 +1677,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 	}
 	mutex_unlock(&i915->ggtt.vm.mutex);
 
-	if (obj->mm.madv == I915_MADV_WILLNEED) {
+	if (i915_gem_object_is_shrinkable(obj) &&
+	    obj->mm.madv == I915_MADV_WILLNEED) {
 		struct list_head *list;
 
 		spin_lock(&i915->mm.obj_lock);
@@ -2432,9 +2414,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 	if (!pages)
 		return NULL;
 
-	spin_lock(&i915->mm.obj_lock);
-	list_del(&obj->mm.link);
-	spin_unlock(&i915->mm.obj_lock);
+	if (i915_gem_object_is_shrinkable(obj)) {
+		spin_lock(&i915->mm.obj_lock);
+		list_del(&obj->mm.link);
+		i915->mm.shrink_count--;
+		i915->mm.shrink_memory -= obj->base.size;
+		spin_unlock(&i915->mm.obj_lock);
+	}
 
 	if (obj->mm.mapping) {
 		void *ptr;
@@ -2719,9 +2705,13 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	}
 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
 
-	spin_lock(&i915->mm.obj_lock);
-	list_add(&obj->mm.link, &i915->mm.unbound_list);
-	spin_unlock(&i915->mm.obj_lock);
+	if (i915_gem_object_is_shrinkable(obj)) {
+		spin_lock(&i915->mm.obj_lock);
+		i915->mm.shrink_count++;
+		i915->mm.shrink_memory += obj->base.size;
+		list_add(&obj->mm.link, &i915->mm.unbound_list);
+		spin_unlock(&i915->mm.obj_lock);
+	}
 }
 
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
@@ -4249,15 +4239,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	if (i915_gem_object_has_pages(obj)) {
 		struct list_head *list;
 
-		spin_lock(&i915->mm.obj_lock);
-		if (obj->mm.madv != I915_MADV_WILLNEED)
-			list = &i915->mm.purge_list;
-		else if (obj->bind_count)
-			list = &i915->mm.bound_list;
-		else
-			list = &i915->mm.unbound_list;
-		list_move_tail(&obj->mm.link, list);
-		spin_unlock(&i915->mm.obj_lock);
+		if (i915_gem_object_is_shrinkable(obj)) {
+			spin_lock(&i915->mm.obj_lock);
+			if (obj->mm.madv != I915_MADV_WILLNEED)
+				list = &i915->mm.purge_list;
+			else if (obj->bind_count)
+				list = &i915->mm.bound_list;
+			else
+				list = &i915->mm.unbound_list;
+			list_move_tail(&obj->mm.link, list);
+			spin_unlock(&i915->mm.obj_lock);
+		}
 	}
 
 	/* if the object is no longer attached, discard its backing storage */
@@ -4304,8 +4296,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	obj->mm.madv = I915_MADV_WILLNEED;
 	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
 	mutex_init(&obj->mm.get_page.lock);
-
-	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4421,6 +4411,8 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
 	 * immediately released. In this case, we can then skip copying
 	 * back the contents from the GPU.
 	 */
+	if (!i915_gem_object_is_shrinkable(obj))
+		return false;
 
 	if (obj->mm.madv != I915_MADV_WILLNEED)
 		return false;
@@ -4467,7 +4459,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		 * or else we may oom whilst there are plenty of deferred
 		 * freed objects.
 		 */
-		if (i915_gem_object_has_pages(obj)) {
+		if (i915_gem_object_has_pages(obj) &&
+		    i915_gem_object_is_shrinkable(obj)) {
 			spin_lock(&i915->mm.obj_lock);
 			list_del_init(&obj->mm.link);
 			spin_unlock(&i915->mm.obj_lock);
@@ -4493,7 +4486,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 		reservation_object_fini(&obj->__builtin_resv);
 		drm_gem_object_release(&obj->base);
-		i915_gem_info_remove_obj(i915, obj->base.size);
 
 		kfree(obj->bit_17);
 		i915_gem_object_free(obj);
@@ -5254,7 +5246,6 @@ i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
 
 static void i915_gem_init__mm(struct drm_i915_private *i915)
 {
-	spin_lock_init(&i915->mm.object_stat_lock);
 	spin_lock_init(&i915->mm.obj_lock);
 	spin_lock_init(&i915->mm.free_lock);
 
@@ -5343,7 +5334,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
 	i915_gem_drain_freed_objects(dev_priv);
 	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
 	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
-	WARN_ON(dev_priv->mm.object_count);
+	WARN_ON(dev_priv->mm.shrink_count);
 	WARN_ON(!list_empty(&dev_priv->gt.timelines));
 
 	kmem_cache_destroy(dev_priv->priorities);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 9257f69639a8..15e8e563f8a8 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -310,30 +310,14 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
 	struct drm_i915_private *i915 =
 		container_of(shrinker, struct drm_i915_private, mm.shrinker);
-	struct drm_i915_gem_object *obj;
-	unsigned long num_objects = 0;
-	unsigned long count = 0;
+	unsigned long num_objects;
+	unsigned long count;
 
-	spin_lock(&i915->mm.obj_lock);
-	list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
-		if (can_release_pages(obj)) {
-			count += obj->base.size >> PAGE_SHIFT;
-			num_objects++;
-		}
+	count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
+	num_objects = READ_ONCE(i915->mm.shrink_count);
 
-	list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
-		if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
-			count += obj->base.size >> PAGE_SHIFT;
-			num_objects++;
-		}
-	list_for_each_entry(obj, &i915->mm.purge_list, mm.link)
-		if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
-			count += obj->base.size >> PAGE_SHIFT;
-			num_objects++;
-		}
-	spin_unlock(&i915->mm.obj_lock);
-
-	/* Update our preferred vmscan batch size for the next pass.
+	/*
+	 * Update our preferred vmscan batch size for the next pass.
 	 * Our rough guess for an effective batch size is roughly 2
 	 * available GEM objects worth of pages. That is we don't want
 	 * the shrinker to fire, until it is worth the cost of freeing an
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index b40567597225..5f464e62ea15 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -673,7 +673,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 	mutex_unlock(&ggtt->vm.mutex);
 
 	spin_lock(&dev_priv->mm.obj_lock);
-	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
+	if (i915_gem_object_is_shrinkable(obj))
+		list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
 	obj->bind_count++;
 	spin_unlock(&dev_priv->mm.obj_lock);
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index ebd6633bd88a..5413b5320289 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -59,10 +59,12 @@ __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
 	 * so that we don't steal from recently used but inactive objects
 	 * (unless we are forced to ofc!)
 	 */
-	spin_lock(&rq->i915->mm.obj_lock);
-	if (obj->bind_count)
-		list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
-	spin_unlock(&rq->i915->mm.obj_lock);
+	if (i915_gem_object_is_shrinkable(obj)) {
+		spin_lock(&rq->i915->mm.obj_lock);
+		if (obj->bind_count)
+			list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
+		spin_unlock(&rq->i915->mm.obj_lock);
+	}
 
 	obj->mm.dirty = true; /* be paranoid  */
 
@@ -669,11 +671,14 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		spin_lock(&dev_priv->mm.obj_lock);
-		list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
-		obj->bind_count++;
-		spin_unlock(&dev_priv->mm.obj_lock);
 
+		if (i915_gem_object_is_shrinkable(obj))
+			list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
+
+		obj->bind_count++;
 		assert_bind_count(obj);
+
+		spin_unlock(&dev_priv->mm.obj_lock);
 	}
 
 	return 0;
@@ -709,9 +714,13 @@ i915_vma_remove(struct i915_vma *vma)
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		spin_lock(&i915->mm.obj_lock);
+
+		GEM_BUG_ON(obj->bind_count == 0);
 		if (--obj->bind_count == 0 &&
+		    i915_gem_object_is_shrinkable(obj) &&
 		    obj->mm.madv == I915_MADV_WILLNEED)
 			list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+
 		spin_unlock(&i915->mm.obj_lock);
 
 		/*
-- 
2.18.0



More information about the Intel-gfx-trybot mailing list