[PATCH v2 3/6] drm/pagemap: DMA map folios when possible
Francois Dugast
francois.dugast at intel.com
Fri Jul 25 15:39:27 UTC 2025
If the page is part of a folio, DMA map the whole folio at once instead of
mapping individual pages one after the other. For example if 2MB folios
are used instead of 4KB pages, this reduces the number of DMA mappings by
512.
The folio order (and consequently, the size) is persisted in the struct
drm_pagemap_device_addr to be available at the time of unmapping.
v2:
- Initialize order variable (Matthew Brost)
- Set proto and dir for completeness (Matthew Brost)
- Do not populate drm_pagemap_addr, document it (Matthew Brost)
- Add and use macro NR_PAGES(order) (Matthew Brost)
Signed-off-by: Francois Dugast <francois.dugast at intel.com>
Cc: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/drm_pagemap.c | 25 ++++++++++++++++++-------
include/drm/drm_pagemap.h | 10 ++++++++--
2 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index de320aeed812..b6f5d3a8e445 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -222,22 +222,30 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
dma_addr_t dma_addr;
+ struct folio *folio;
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next;
if (WARN_ON_ONCE(is_zone_device_page(page)))
return -EFAULT;
- dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
+ folio = page_folio(page);
+ order = folio_order(folio);
+
+ dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
if (dma_mapping_error(dev, dma_addr))
return -EFAULT;
pagemap_addr[i] = drm_pagemap_addr_encode(
- dma_addr, DRM_INTERCONNECT_SYSTEM, 0, dir);
+ dma_addr, DRM_INTERCONNECT_SYSTEM, order, dir);
+
+next:
+ i += NR_PAGES(order);
}
return 0;
@@ -261,11 +269,14 @@ static void drm_pagemap_migrate_unmap_pages(struct device *dev,
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
- continue;
+ goto next;
+
+ dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
- dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE, dir);
+next:
+ i += NR_PAGES(pagemap_addr[i].order);
}
}
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index 1d5919a99139..ef18d98dbc7e 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -6,6 +6,8 @@
#include <linux/hmm.h>
#include <linux/types.h>
+#define NR_PAGES(order) (1U << order)
+
struct drm_pagemap;
struct drm_pagemap_zdd;
struct device;
@@ -173,7 +175,9 @@ struct drm_pagemap_devmem_ops {
* @pagemap_addr: Pointer to array of DMA information (source)
* @npages: Number of pages to copy
*
- * Copy pages to device memory.
+ * Copy pages to device memory. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
*
* Return: 0 on success, a negative error code on failure.
*/
@@ -187,7 +191,9 @@ struct drm_pagemap_devmem_ops {
* @pagemap_addr: Pointer to array of DMA information (destination)
* @npages: Number of pages to copy
*
- * Copy pages to system RAM.
+ * Copy pages to system RAM. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
*
* Return: 0 on success, a negative error code on failure.
*/
--
2.43.0
More information about the Intel-xe
mailing list