[PATCH mm-unstable v1 09/20] mm/gup: reliable R/O long-term pinning in COW mappings

John Hubbard jhubbard at nvidia.com
Thu Nov 24 01:29:50 UTC 2022


On 11/16/22 02:26, David Hildenbrand wrote:
...
> With this change, the new R/O long-term pinning tests for non-anonymous
> memory succeed:
>    # [RUN] R/O longterm GUP pin ... with shared zeropage
>    ok 151 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP pin ... with memfd
>    ok 152 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP pin ... with tmpfile
>    ok 153 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP pin ... with huge zeropage
>    ok 154 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP pin ... with memfd hugetlb (2048 kB)
>    ok 155 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP pin ... with memfd hugetlb (1048576 kB)
>    ok 156 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP-fast pin ... with shared zeropage
>    ok 157 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP-fast pin ... with memfd
>    ok 158 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP-fast pin ... with tmpfile
>    ok 159 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP-fast pin ... with huge zeropage
>    ok 160 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP-fast pin ... with memfd hugetlb (2048 kB)
>    ok 161 Longterm R/O pin is reliable
>    # [RUN] R/O longterm GUP-fast pin ... with memfd hugetlb (1048576 kB)
>    ok 162 Longterm R/O pin is reliable

Yes. I was able to reproduce these results, after some minor distractions
involving huge pages, don't ask. :)

> 
> Note 1: We don't care about short-term R/O-pinning, because they have
> snapshot semantics: they are not supposed to observe modifications that
> happen after pinning.
> 
> As one example, assume we start direct I/O to read from a page and store
> page content into a file: modifications to page content after starting
> direct I/O are not guaranteed to end up in the file. So even if we'd pin
> the shared zeropage, the end result would be as expected -- getting zeroes
> stored to the file.
> 
> Note 2: For shared mappings we'll now always fallback to the slow path to
> lookup the VMA when R/O long-term pining. While that's the necessary price
> we have to pay right now, it's actually not that bad in practice: most
> FOLL_LONGTERM users already specify FOLL_WRITE, for example, along with
> FOLL_FORCE because they tried dealing with COW mappings correctly ...
> 
> Note 3: For users that use FOLL_LONGTERM right now without FOLL_WRITE,
> such as VFIO, we'd now no longer pin the shared zeropage. Instead, we'd
> populate exclusive anon pages that we can pin. There was a concern that
> this could affect the memlock limit of existing setups.
> 
> For example, a VM running with VFIO could run into the memlock limit and
> fail to run. However, we essentially had the same behavior already in
> commit 17839856fd58 ("gup: document and work around "COW can break either
> way" issue") which got merged into some enterprise distros, and there were
> not any such complaints. So most probably, we're fine.
> 
> Signed-off-by: David Hildenbrand <david at redhat.com>
> ---
>   include/linux/mm.h | 27 ++++++++++++++++++++++++---
>   mm/gup.c           | 10 +++++-----
>   mm/huge_memory.c   |  2 +-
>   mm/hugetlb.c       |  7 ++++---
>   4 files changed, 34 insertions(+), 12 deletions(-)
> 

Looks good,

Reviewed-by: John Hubbard <jhubbard at nvidia.com>

thanks,
-- 
John Hubbard
NVIDIA

> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 6bd2ee5872dd..e8cc838f42f9 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -3095,8 +3095,12 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
>    * Must be called with the (sub)page that's actually referenced via the
>    * page table entry, which might not necessarily be the head page for a
>    * PTE-mapped THP.
> + *
> + * If the vma is NULL, we're coming from the GUP-fast path and might have
> + * to fallback to the slow path just to lookup the vma.
>    */
> -static inline bool gup_must_unshare(unsigned int flags, struct page *page)
> +static inline bool gup_must_unshare(struct vm_area_struct *vma,
> +				    unsigned int flags, struct page *page)
>   {
>   	/*
>   	 * FOLL_WRITE is implicitly handled correctly as the page table entry
> @@ -3109,8 +3113,25 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page)
>   	 * Note: PageAnon(page) is stable until the page is actually getting
>   	 * freed.
>   	 */
> -	if (!PageAnon(page))
> -		return false;
> +	if (!PageAnon(page)) {
> +		/*
> +		 * We only care about R/O long-term pining: R/O short-term
> +		 * pinning does not have the semantics to observe successive
> +		 * changes through the process page tables.
> +		 */
> +		if (!(flags & FOLL_LONGTERM))
> +			return false;
> +
> +		/* We really need the vma ... */
> +		if (!vma)
> +			return true;
> +
> +		/*
> +		 * ... because we only care about writable private ("COW")
> +		 * mappings where we have to break COW early.
> +		 */
> +		return is_cow_mapping(vma->vm_flags);
> +	}
>   
>   	/* Paired with a memory barrier in page_try_share_anon_rmap(). */
>   	if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
> diff --git a/mm/gup.c b/mm/gup.c
> index 5182abaaecde..01116699c863 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -578,7 +578,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
>   		}
>   	}
>   
> -	if (!pte_write(pte) && gup_must_unshare(flags, page)) {
> +	if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
>   		page = ERR_PTR(-EMLINK);
>   		goto out;
>   	}
> @@ -2338,7 +2338,7 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
>   			goto pte_unmap;
>   		}
>   
> -		if (!pte_write(pte) && gup_must_unshare(flags, page)) {
> +		if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
>   			gup_put_folio(folio, 1, flags);
>   			goto pte_unmap;
>   		}
> @@ -2506,7 +2506,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
>   		return 0;
>   	}
>   
> -	if (!pte_write(pte) && gup_must_unshare(flags, &folio->page)) {
> +	if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
>   		gup_put_folio(folio, refs, flags);
>   		return 0;
>   	}
> @@ -2572,7 +2572,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
>   		return 0;
>   	}
>   
> -	if (!pmd_write(orig) && gup_must_unshare(flags, &folio->page)) {
> +	if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
>   		gup_put_folio(folio, refs, flags);
>   		return 0;
>   	}
> @@ -2612,7 +2612,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
>   		return 0;
>   	}
>   
> -	if (!pud_write(orig) && gup_must_unshare(flags, &folio->page)) {
> +	if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
>   		gup_put_folio(folio, refs, flags);
>   		return 0;
>   	}
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 68d00196b519..dec7a7c0eca8 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1434,7 +1434,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
>   	if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags))
>   		return NULL;
>   
> -	if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
> +	if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
>   		return ERR_PTR(-EMLINK);
>   
>   	VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 383b26069b33..c3aab6d5b7aa 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6195,7 +6195,8 @@ static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
>   	}
>   }
>   
> -static inline bool __follow_hugetlb_must_fault(unsigned int flags, pte_t *pte,
> +static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
> +					       unsigned int flags, pte_t *pte,
>   					       bool *unshare)
>   {
>   	pte_t pteval = huge_ptep_get(pte);
> @@ -6207,7 +6208,7 @@ static inline bool __follow_hugetlb_must_fault(unsigned int flags, pte_t *pte,
>   		return false;
>   	if (flags & FOLL_WRITE)
>   		return true;
> -	if (gup_must_unshare(flags, pte_page(pteval))) {
> +	if (gup_must_unshare(vma, flags, pte_page(pteval))) {
>   		*unshare = true;
>   		return true;
>   	}
> @@ -6336,7 +6337,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
>   		 * directly from any kind of swap entries.
>   		 */
>   		if (absent ||
> -		    __follow_hugetlb_must_fault(flags, pte, &unshare)) {
> +		    __follow_hugetlb_must_fault(vma, flags, pte, &unshare)) {
>   			vm_fault_t ret;
>   			unsigned int fault_flags = 0;
>   



More information about the etnaviv mailing list