[PATCH v1 1/5] drm/pagemap: Use struct drm_pagemap_device_addr in mapping and copy functions
Matthew Brost
matthew.brost at intel.com
Fri Jul 18 04:03:18 UTC 2025
On Thu, Jul 17, 2025 at 03:38:23PM +0200, Francois Dugast wrote:
> This struct embeds more information than just the DMA address. This will help
> later to support folio orders greater than zero. At this point, there is no
> functional change as the only struct member used is addr.
>
This patch alone will break the build. You'll need to combine it with
the next patch to avoid build breakage.
> Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/drm_pagemap.c | 58 +++++++++++++++++------------------
> include/drm/drm_pagemap.h | 8 ++---
> 2 files changed, 33 insertions(+), 33 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
> index 1da55322af12..0ed66aaade14 100644
> --- a/drivers/gpu/drm/drm_pagemap.c
> +++ b/drivers/gpu/drm/drm_pagemap.c
> @@ -202,7 +202,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
> /**
> * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
> * @dev: The device for which the pages are being mapped
> - * @dma_addr: Array to store DMA addresses corresponding to mapped pages
> + * @device_addr: Array to store DMA information corresponding to mapped pages
> * @migrate_pfn: Array of migrate page frame numbers to map
> * @npages: Number of pages to map
> * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
> @@ -215,7 +215,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
> * Returns: 0 on success, -EFAULT if an error occurs during mapping.
> */
> static int drm_pagemap_migrate_map_pages(struct device *dev,
> - dma_addr_t *dma_addr,
> + struct drm_pagemap_device_addr *device_addr,
I like the change to drm_pagemap_device_addr—I think it fits with the
patch—but it's not actually a device address. It's a DMA mapping of CPU
memory. Originally, drm_pagemap_device_addr was intended to represent a
device memory address shared between devices. That said, I think it
still works for our purpose here.
So, I suggest we rename it:
s/drm_pagemap_device_addr/drm_pagemap_addr
And for the variable:
s/device_addr/pagemap_addr
Thomas is back Monday, perhaps field his input on this too.
> unsigned long *migrate_pfn,
> unsigned long npages,
> enum dma_data_direction dir)
> @@ -231,8 +231,8 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
> if (WARN_ON_ONCE(is_zone_device_page(page)))
> return -EFAULT;
>
> - dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
> - if (dma_mapping_error(dev, dma_addr[i]))
> + device_addr[i].addr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
I'd explicitly set proto to DRM_INTERCONNECT_SYSTEM here—even though
it's zero—for clarity, and also set the dir for completeness.
Matt
> + if (dma_mapping_error(dev, device_addr[i].addr))
> return -EFAULT;
> }
>
> @@ -242,7 +242,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
> /**
> * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
> * @dev: The device for which the pages were mapped
> - * @dma_addr: Array of DMA addresses corresponding to mapped pages
> + * @device_addr: Array of DMA information corresponding to mapped pages
> * @npages: Number of pages to unmap
> * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
> *
> @@ -251,17 +251,17 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
> * if it's valid and not already unmapped, and unmaps the corresponding page.
> */
> static void drm_pagemap_migrate_unmap_pages(struct device *dev,
> - dma_addr_t *dma_addr,
> + struct drm_pagemap_device_addr *device_addr,
> unsigned long npages,
> enum dma_data_direction dir)
> {
> unsigned long i;
>
> for (i = 0; i < npages; ++i) {
> - if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
> + if (!device_addr[i].addr || dma_mapping_error(dev, device_addr[i].addr))
> continue;
>
> - dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
> + dma_unmap_page(dev, device_addr[i].addr, PAGE_SIZE, dir);
> }
> }
>
> @@ -314,7 +314,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
> struct vm_area_struct *vas;
> struct drm_pagemap_zdd *zdd = NULL;
> struct page **pages;
> - dma_addr_t *dma_addr;
> + struct drm_pagemap_device_addr *device_addr;
> void *buf;
> int err;
>
> @@ -340,14 +340,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
> goto err_out;
> }
>
> - buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
> + buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*device_addr) +
> sizeof(*pages), GFP_KERNEL);
> if (!buf) {
> err = -ENOMEM;
> goto err_out;
> }
> - dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
> - pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
> + device_addr = buf + (2 * sizeof(*migrate.src) * npages);
> + pages = buf + (2 * sizeof(*migrate.src) + sizeof(*device_addr)) * npages;
>
> zdd = drm_pagemap_zdd_alloc(pgmap_owner);
> if (!zdd) {
> @@ -377,7 +377,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
> if (err)
> goto err_finalize;
>
> - err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
> + err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, device_addr,
> migrate.src, npages, DMA_TO_DEVICE);
> if (err)
> goto err_finalize;
> @@ -390,7 +390,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
> drm_pagemap_get_devmem_page(page, zdd);
> }
>
> - err = ops->copy_to_devmem(pages, dma_addr, npages);
> + err = ops->copy_to_devmem(pages, device_addr, npages);
> if (err)
> goto err_finalize;
>
> @@ -404,7 +404,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
> drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
> migrate_vma_pages(&migrate);
> migrate_vma_finalize(&migrate);
> - drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
> + drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, device_addr, npages,
> DMA_TO_DEVICE);
> err_free:
> if (zdd)
> @@ -509,7 +509,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
> unsigned long npages, mpages = 0;
> struct page **pages;
> unsigned long *src, *dst;
> - dma_addr_t *dma_addr;
> + struct drm_pagemap_device_addr *device_addr;
> void *buf;
> int i, err = 0;
> unsigned int retry_count = 2;
> @@ -520,7 +520,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
> if (!mmget_not_zero(devmem_allocation->mm))
> return -EFAULT;
>
> - buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
> + buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*device_addr) +
> sizeof(*pages), GFP_KERNEL);
> if (!buf) {
> err = -ENOMEM;
> @@ -528,8 +528,8 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
> }
> src = buf;
> dst = buf + (sizeof(*src) * npages);
> - dma_addr = buf + (2 * sizeof(*src) * npages);
> - pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
> + device_addr = buf + (2 * sizeof(*src) * npages);
> + pages = buf + (2 * sizeof(*src) + sizeof(*device_addr)) * npages;
>
> err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
> if (err)
> @@ -544,7 +544,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
> if (err || !mpages)
> goto err_finalize;
>
> - err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
> + err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, device_addr,
> dst, npages, DMA_FROM_DEVICE);
> if (err)
> goto err_finalize;
> @@ -552,7 +552,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
> for (i = 0; i < npages; ++i)
> pages[i] = migrate_pfn_to_page(src[i]);
>
> - err = ops->copy_to_ram(pages, dma_addr, npages);
> + err = ops->copy_to_ram(pages, device_addr, npages);
> if (err)
> goto err_finalize;
>
> @@ -561,7 +561,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
> drm_pagemap_migration_unlock_put_pages(npages, dst);
> migrate_device_pages(src, dst, npages);
> migrate_device_finalize(src, dst, npages);
> - drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
> + drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, device_addr, npages,
> DMA_FROM_DEVICE);
> err_free:
> kvfree(buf);
> @@ -612,7 +612,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
> struct device *dev = NULL;
> unsigned long npages, mpages = 0;
> struct page **pages;
> - dma_addr_t *dma_addr;
> + struct drm_pagemap_device_addr *device_addr;
> unsigned long start, end;
> void *buf;
> int i, err = 0;
> @@ -637,14 +637,14 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
> migrate.end = end;
> npages = npages_in_range(start, end);
>
> - buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
> + buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*device_addr) +
> sizeof(*pages), GFP_KERNEL);
> if (!buf) {
> err = -ENOMEM;
> goto err_out;
> }
> - dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
> - pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
> + device_addr = buf + (2 * sizeof(*migrate.src) * npages);
> + pages = buf + (2 * sizeof(*migrate.src) + sizeof(*device_addr)) * npages;
>
> migrate.vma = vas;
> migrate.src = buf;
> @@ -680,7 +680,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
> if (err)
> goto err_finalize;
>
> - err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
> + err = drm_pagemap_migrate_map_pages(dev, device_addr, migrate.dst, npages,
> DMA_FROM_DEVICE);
> if (err)
> goto err_finalize;
> @@ -688,7 +688,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
> for (i = 0; i < npages; ++i)
> pages[i] = migrate_pfn_to_page(migrate.src[i]);
>
> - err = ops->copy_to_ram(pages, dma_addr, npages);
> + err = ops->copy_to_ram(pages, device_addr, npages);
> if (err)
> goto err_finalize;
>
> @@ -698,7 +698,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
> migrate_vma_pages(&migrate);
> migrate_vma_finalize(&migrate);
> if (dev)
> - drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
> + drm_pagemap_migrate_unmap_pages(dev, device_addr, npages,
> DMA_FROM_DEVICE);
> err_free:
> kvfree(buf);
> diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
> index e5f20a1235be..a0c0882eeb8e 100644
> --- a/include/drm/drm_pagemap.h
> +++ b/include/drm/drm_pagemap.h
> @@ -170,7 +170,7 @@ struct drm_pagemap_devmem_ops {
> /**
> * @copy_to_devmem: Copy to device memory (required for migration)
> * @pages: Pointer to array of device memory pages (destination)
> - * @dma_addr: Pointer to array of DMA addresses (source)
> + * @device_addr: Pointer to array of DMA information (source)
> * @npages: Number of pages to copy
> *
> * Copy pages to device memory.
> @@ -178,13 +178,13 @@ struct drm_pagemap_devmem_ops {
> * Return: 0 on success, a negative error code on failure.
> */
> int (*copy_to_devmem)(struct page **pages,
> - dma_addr_t *dma_addr,
> + struct drm_pagemap_device_addr *device_addr,
> unsigned long npages);
>
> /**
> * @copy_to_ram: Copy to system RAM (required for migration)
> * @pages: Pointer to array of device memory pages (source)
> - * @dma_addr: Pointer to array of DMA addresses (destination)
> + * @device_addr: Pointer to array of DMA information (destination)
> * @npages: Number of pages to copy
> *
> * Copy pages to system RAM.
> @@ -192,7 +192,7 @@ struct drm_pagemap_devmem_ops {
> * Return: 0 on success, a negative error code on failure.
> */
> int (*copy_to_ram)(struct page **pages,
> - dma_addr_t *dma_addr,
> + struct drm_pagemap_device_addr *device_addr,
> unsigned long npages);
> };
>
> --
> 2.43.0
>
More information about the Intel-xe
mailing list