[PATCH v3 39/41] mm/mlock.c: convert put_page() to put_user_page*()
john.hubbard at gmail.com
john.hubbard at gmail.com
Wed Aug 7 01:33:38 UTC 2019
From: John Hubbard <jhubbard at nvidia.com>
For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page() or
release_pages().
This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").
Cc: Dan Williams <dan.j.williams at intel.com>
Cc: Daniel Black <daniel at linux.ibm.com>
Cc: Jan Kara <jack at suse.cz>
Cc: Jérôme Glisse <jglisse at redhat.com>
Cc: Matthew Wilcox <willy at infradead.org>
Cc: Mike Kravetz <mike.kravetz at oracle.com>
Signed-off-by: John Hubbard <jhubbard at nvidia.com>
---
mm/mlock.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/mm/mlock.c b/mm/mlock.c
index a90099da4fb4..b980e6270e8a 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -345,7 +345,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
get_page(page); /* for putback_lru_page() */
__munlock_isolated_page(page);
unlock_page(page);
- put_page(page); /* from follow_page_mask() */
+ put_user_page(page); /* from follow_page_mask() */
}
}
}
@@ -467,7 +467,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
if (page && !IS_ERR(page)) {
if (PageTransTail(page)) {
VM_BUG_ON_PAGE(PageMlocked(page), page);
- put_page(page); /* follow_page_mask() */
+ put_user_page(page); /* follow_page_mask() */
} else if (PageTransHuge(page)) {
lock_page(page);
/*
@@ -478,7 +478,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
*/
page_mask = munlock_vma_page(page);
unlock_page(page);
- put_page(page); /* follow_page_mask() */
+ put_user_page(page); /* follow_page_mask() */
} else {
/*
* Non-huge pages are handled in batches via
--
2.22.0
More information about the dri-devel
mailing list