[PATCH 21/23] mm,drm/i915: Mark pinned shmemfs pages as unevictable

Chris Wilson chris at chris-wilson.co.uk
Sun Sep 3 11:54:26 UTC 2017


Similar in principle to the treatment of get_user_pages, pages that
i915.ko acquires from shmemfs are not immediately reclaimable and so
should be excluded from the mm accounting and vmscan until they have
been returned to the system via shrink_slab/i915_gem_shrink. By moving
the unreclaimable pages off the inactive anon lru, not only should
vmscan be improved by avoiding walking unreclaimable pages, but the
system should also have a better idea of how much memory it can reclaim
at that moment in time.

Note, however, the interaction with shrink_slab which will move some
mlocked pages back to the inactive anon lru.

Suggested-by: Dave Hansen <dave.hansen at intel.com>
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
Cc: Matthew Auld <matthew.auld at intel.com>
Cc: Dave Hansen <dave.hansen at intel.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov at linux.intel.com>
Cc: Andrew Morton <akpm at linux-foundation.org>
Cc: Michal Hocko <mhocko at suse.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 18 +++++++++++++++++-
 mm/mlock.c                      |  2 ++
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 90835db4842d..8b1b3906bb31 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2186,6 +2186,9 @@ void __i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 	obj->mm.pages = ERR_PTR(-EFAULT);
 }
 
+extern void mlock_vma_page(struct page *page);
+extern unsigned int munlock_vma_page(struct page *page);
+
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
 			      struct sg_table *pages)
@@ -2207,6 +2210,10 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
 		if (obj->mm.madv == I915_MADV_WILLNEED)
 			mark_page_accessed(page);
 
+		lock_page(page);
+		munlock_vma_page(page);
+		unlock_page(page);
+
 		put_page(page);
 	}
 	obj->mm.dirty = false;
@@ -2411,6 +2418,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 		}
 		last_pfn = page_to_pfn(page);
 
+		lock_page(page);
+		mlock_vma_page(page);
+		unlock_page(page);
+
 		/* Check that the i965g/gm workaround works. */
 		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
 	}
@@ -2449,8 +2460,13 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 err_sg:
 	sg_mark_end(sg);
 err_pages:
-	for_each_sgt_page(page, sgt_iter, st)
+	for_each_sgt_page(page, sgt_iter, st) {
+		lock_page(page);
+		munlock_vma_page(page);
+		unlock_page(page);
+
 		put_page(page);
+	}
 	sg_free_table(st);
 	kfree(st);
 
diff --git a/mm/mlock.c b/mm/mlock.c
index b562b5523a65..531d9f8fd033 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -94,6 +94,7 @@ void mlock_vma_page(struct page *page)
 			putback_lru_page(page);
 	}
 }
+EXPORT_SYMBOL_GPL(mlock_vma_page);
 
 /*
  * Isolate a page from LRU with optional get_page() pin.
@@ -211,6 +212,7 @@ unsigned int munlock_vma_page(struct page *page)
 out:
 	return nr_pages - 1;
 }
+EXPORT_SYMBOL_GPL(munlock_vma_page);
 
 /*
  * convert get_user_pages() return value to posix mlock() error
-- 
2.14.1



More information about the Intel-gfx-trybot mailing list