[PATCH v2 4/6] drm/pagemap: Allocate folios when possible
Francois Dugast
francois.dugast at intel.com
Fri Jul 25 15:39:28 UTC 2025
If the order is greater than zero, allocate a folio when populating the
RAM PFNs instead of allocating individual pages one after the other. For
example if 2MB folios are used instead of 4KB pages, this reduces the
number of calls to the allocation API by 512.
v2:
- Use page order instead of extr argument (Matthew Brost)
- Allocate with folio_alloc() (Matthew Brost)
- Loop for mpages and free_pages based on order (Matthew Brost)
Signed-off-by: Francois Dugast <francois.dugast at intel.com>
Cc: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/drm_pagemap.c | 38 ++++++++++++++++++++++++++---------
1 file changed, 28 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index b6f5d3a8e445..38753ce2bca7 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -458,54 +458,72 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
{
unsigned long i;
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
+ for (i = 0; i < npages;) {
struct page *page, *src_page;
+ unsigned int order;
+ struct folio *folio;
if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
- continue;
+ goto next;
src_page = migrate_pfn_to_page(src_mpfn[i]);
if (!src_page)
- continue;
+ goto next;
if (fault_page) {
if (src_page->zone_device_data !=
fault_page->zone_device_data)
- continue;
+ goto next;
}
+ order = folio_order(page_folio(src_page));
+
+ /* TODO: Support fallback to single pages if THP allocation fails */
if (vas)
- page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr);
else
- page = alloc_page(GFP_HIGHUSER);
+ folio = folio_alloc(GFP_HIGHUSER, order);
- if (!page)
+ if (!folio)
goto free_pages;
+ page = folio_page(folio, 0);
mpfn[i] = migrate_pfn(page_to_pfn(page));
+
+next:
+ addr += page_size(page);
+ i += NR_PAGES(order);
}
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order;
if (!page)
continue;
WARN_ON_ONCE(!trylock_page(page));
- ++*mpages;
+
+ order = folio_order(page_folio(page));
+ *mpages += NR_PAGES(order);
+ i += NR_PAGES(order);
}
return 0;
free_pages:
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order;
if (!page)
continue;
put_page(page);
mpfn[i] = 0;
+
+ order = folio_order(page_folio(page));
+ i += NR_PAGES(order);
}
return -ENOMEM;
}
--
2.43.0
More information about the Intel-xe
mailing list