[PATCH hmm 2/8] mm/hmm: don't free the cached pgmap while scanning
Christoph Hellwig
hch at lst.de
Mon Mar 16 18:13:24 UTC 2020
On Mon, Mar 16, 2020 at 03:07:13PM -0300, Jason Gunthorpe wrote:
> I chose this to be simple without having to goto unwind it.
>
> So, instead like this:
As ѕaid, and per the previous discussion: I think just removing the
pgmap lookup is the right thing to do here. Something like this patch:
diff --git a/mm/hmm.c b/mm/hmm.c
index 3d10485bf323..9f1049815d44 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -28,7 +28,6 @@
struct hmm_vma_walk {
struct hmm_range *range;
- struct dev_pagemap *pgmap;
unsigned long last;
unsigned int flags;
};
@@ -198,15 +197,8 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
return hmm_vma_fault(addr, end, fault, write_fault, walk);
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
- if (pmd_devmap(pmd)) {
- hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
- hmm_vma_walk->pgmap);
- if (unlikely(!hmm_vma_walk->pgmap))
- return -EBUSY;
- }
+ for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
- }
hmm_vma_walk->last = end;
return 0;
}
@@ -277,15 +269,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (fault || write_fault)
goto fault;
- if (pte_devmap(pte)) {
- hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
- hmm_vma_walk->pgmap);
- if (unlikely(!hmm_vma_walk->pgmap)) {
- pte_unmap(ptep);
- return -EBUSY;
- }
- }
-
/*
* Since each architecture defines a struct page for the zero page, just
* fall through and treat it like a normal page.
@@ -455,12 +438,6 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
for (i = 0; i < npages; ++i, ++pfn) {
- hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
- hmm_vma_walk->pgmap);
- if (unlikely(!hmm_vma_walk->pgmap)) {
- ret = -EBUSY;
- goto out_unlock;
- }
pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
cpu_flags;
}
@@ -614,15 +591,6 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
return -EBUSY;
ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
&hmm_walk_ops, &hmm_vma_walk);
- /*
- * A pgmap is kept cached in the hmm_vma_walk to avoid expensive
- * searching in the probably common case that the pgmap is the
- * same for the entire requested range.
- */
- if (hmm_vma_walk.pgmap) {
- put_dev_pagemap(hmm_vma_walk.pgmap);
- hmm_vma_walk.pgmap = NULL;
- }
} while (ret == -EBUSY);
if (ret)
More information about the amd-gfx
mailing list