[PATCH v1 2/5] drm/xe/svm: Use struct drm_pagemap_device_addr
Matthew Brost
matthew.brost at intel.com
Fri Jul 18 04:12:52 UTC 2025
On Thu, Jul 17, 2025 at 03:38:24PM +0200, Francois Dugast wrote:
> Adapt to the new drm_gpusvm_devmem_ops type signatures using struct
> drm_pagemap_device_addr, as well as the internal xe SVM functions
> implementing those operations.
>
> Besides, the use of drm_gpusvm_dma_info is propagated to xe_migrate
> because it makes indexed accesses to the next DMA address but they
> are no longer contiguous.
>
> There is no functional change.
>
Again, combine this previous patch.
Same comment about device_addr vs. pagemap_addr as previous patch.
> Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>
> ---
> drivers/gpu/drm/xe/xe_migrate.c | 55 ++++++++++++++++++---------------
> drivers/gpu/drm/xe/xe_migrate.h | 5 +--
> drivers/gpu/drm/xe/xe_svm.c | 37 ++++++++++++----------
> 3 files changed, 54 insertions(+), 43 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index 6a80ae6104dd..33d67993f1f6 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -9,6 +9,7 @@
> #include <linux/sizes.h>
>
> #include <drm/drm_managed.h>
> +#include <drm/drm_pagemap.h>
> #include <drm/ttm/ttm_tt.h>
> #include <uapi/drm/xe_drm.h>
>
> @@ -1585,7 +1586,8 @@ static u32 pte_update_cmd_size(u64 size)
>
> static void build_pt_update_batch_sram(struct xe_migrate *m,
> struct xe_bb *bb, u32 pt_offset,
> - dma_addr_t *sram_addr, u32 size)
> + struct drm_pagemap_device_addr *sram_addr,
> + u32 size)
> {
> u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
> u32 ptes;
> @@ -1603,7 +1605,7 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
> ptes -= chunk;
>
> while (chunk--) {
> - u64 addr = sram_addr[i++] & PAGE_MASK;
> + u64 addr = sram_addr[i++].addr & PAGE_MASK;
>
xe_assert(xe, sram_addr[i].proto == DRM_INTERCONNECT_SYSTEM);
Obviously, don't do this after incrementing 'i' but you get the idea -
assert address is using the dma map protocol.
> xe_tile_assert(m->tile, addr);
> addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
> @@ -1626,7 +1628,8 @@ enum xe_migrate_copy_dir {
> static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
> unsigned long len,
> unsigned long sram_offset,
> - dma_addr_t *sram_addr, u64 vram_addr,
> + struct drm_pagemap_device_addr *sram_addr,
> + u64 vram_addr,
> const enum xe_migrate_copy_dir dir)
> {
> struct xe_gt *gt = m->tile->primary_gt;
> @@ -1708,7 +1711,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
> * xe_migrate_to_vram() - Migrate to VRAM
> * @m: The migration context.
> * @npages: Number of pages to migrate.
> - * @src_addr: Array of dma addresses (source of migrate)
> + * @src_addr: Array of DMA information (source of migrate)
> * @dst_addr: Device physical address of VRAM (destination of migrate)
> *
> * Copy from an array dma addresses to a VRAM device physical address
> @@ -1718,7 +1721,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
> */
> struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
> unsigned long npages,
> - dma_addr_t *src_addr,
> + struct drm_pagemap_device_addr *src_addr,
> u64 dst_addr)
> {
> return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
> @@ -1730,7 +1733,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
> * @m: The migration context.
> * @npages: Number of pages to migrate.
> * @src_addr: Device physical address of VRAM (source of migrate)
> - * @dst_addr: Array of dma addresses (destination of migrate)
> + * @dst_addr: Array of DMA information (destination of migrate)
> *
> * Copy from a VRAM device physical address to an array dma addresses
> *
> @@ -1740,35 +1743,37 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
> struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
> unsigned long npages,
> u64 src_addr,
> - dma_addr_t *dst_addr)
> + struct drm_pagemap_device_addr *dst_addr)
> {
> return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
> XE_MIGRATE_COPY_TO_SRAM);
> }
>
> -static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
> +static void xe_migrate_dma_unmap(struct xe_device *xe,
> + struct drm_pagemap_device_addr *device_addr,
> int len, int write)
> {
> unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
>
> for (i = 0; i < npages; ++i) {
> - if (!dma_addr[i])
> + if (!device_addr[i].addr)
> break;
>
> - dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
> + dma_unmap_page(xe->drm.dev, device_addr[i].addr, PAGE_SIZE,
> write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
> }
> - kfree(dma_addr);
> + kfree(device_addr);
> }
>
> -static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
> - void *buf, int len, int write)
> +static struct drm_pagemap_device_addr *xe_migrate_dma_map(struct xe_device *xe,
> + void *buf, int len,
> + int write)
> {
> - dma_addr_t *dma_addr;
> + struct drm_pagemap_device_addr *device_addr;
> unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
>
> - dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
> - if (!dma_addr)
> + device_addr = kcalloc(npages, sizeof(*device_addr), GFP_KERNEL);
> + if (!device_addr)
> return ERR_PTR(-ENOMEM);
>
> for (i = 0; i < npages; ++i) {
> @@ -1787,14 +1792,14 @@ static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
> if (dma_mapping_error(xe->drm.dev, addr))
> goto err_fault;
>
> - dma_addr[i] = addr;
> + device_addr[i].addr = addr;
Set proto, dir for clarity and completeness.
Matt
> buf += PAGE_SIZE;
> }
>
> - return dma_addr;
> + return device_addr;
>
> err_fault:
> - xe_migrate_dma_unmap(xe, dma_addr, len, write);
> + xe_migrate_dma_unmap(xe, device_addr, len, write);
> return ERR_PTR(-EFAULT);
> }
>
> @@ -1823,7 +1828,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
> struct xe_device *xe = tile_to_xe(tile);
> struct xe_res_cursor cursor;
> struct dma_fence *fence = NULL;
> - dma_addr_t *dma_addr;
> + struct drm_pagemap_device_addr *device_addr;
> unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
> int bytes_left = len, current_page = 0;
> void *orig_buf = buf;
> @@ -1878,9 +1883,9 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
> return 0;
> }
>
> - dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
> - if (IS_ERR(dma_addr))
> - return PTR_ERR(dma_addr);
> + device_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
> + if (IS_ERR(device_addr))
> + return PTR_ERR(device_addr);
>
> xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
>
> @@ -1901,7 +1906,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
>
> __fence = xe_migrate_vram(m, current_bytes,
> (unsigned long)buf & ~PAGE_MASK,
> - dma_addr + current_page,
> + &device_addr[current_page],
> vram_addr, write ?
> XE_MIGRATE_COPY_TO_VRAM :
> XE_MIGRATE_COPY_TO_SRAM);
> @@ -1925,7 +1930,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
> dma_fence_put(fence);
>
> out_err:
> - xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
> + xe_migrate_dma_unmap(xe, device_addr, len + page_offset, write);
> return IS_ERR(fence) ? PTR_ERR(fence) : 0;
> }
>
> diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
> index 74c60f55004a..f8bff3f2904a 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.h
> +++ b/drivers/gpu/drm/xe/xe_migrate.h
> @@ -9,6 +9,7 @@
> #include <linux/types.h>
>
> struct dma_fence;
> +struct drm_pagemap_device_addr;
> struct iosys_map;
> struct ttm_resource;
>
> @@ -98,13 +99,13 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
>
> struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
> unsigned long npages,
> - dma_addr_t *src_addr,
> + struct drm_pagemap_device_addr *src_addr,
> u64 dst_addr);
>
> struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
> unsigned long npages,
> u64 src_addr,
> - dma_addr_t *dst_addr);
> + struct drm_pagemap_device_addr *dst_addr);
>
> struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
> struct xe_bo *src_bo,
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 10c8a1bcb86e..80dfb96d5ca4 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -328,7 +328,8 @@ enum xe_svm_copy_dir {
> XE_SVM_COPY_TO_SRAM,
> };
>
> -static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
> +static int xe_svm_copy(struct page **pages,
> + struct drm_pagemap_device_addr *device_addr,
> unsigned long npages, const enum xe_svm_copy_dir dir)
> {
> struct xe_vram_region *vr = NULL;
> @@ -360,7 +361,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
> last = (i + 1) == npages;
>
> /* No CPU page and no device pages queue'd to copy */
> - if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
> + if (!device_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
> continue;
>
> if (!vr && spage) {
> @@ -374,7 +375,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
> * first device page, check if physical contiguous on subsequent
> * device pages.
> */
> - if (dma_addr[i] && spage) {
> + if (device_addr[i].addr && spage) {
> __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
> if (vram_addr == XE_VRAM_ADDR_INVALID) {
> vram_addr = __vram_addr;
> @@ -399,18 +400,20 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
> if (sram) {
> vm_dbg(&xe->drm,
> "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
> - vram_addr, (u64)dma_addr[pos], i - pos + incr);
> + vram_addr,
> + (u64)device_addr[pos].addr, i - pos + incr);
> __fence = xe_migrate_from_vram(vr->migrate,
> i - pos + incr,
> vram_addr,
> - dma_addr + pos);
> + &device_addr[pos]);
> } else {
> vm_dbg(&xe->drm,
> "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
> - (u64)dma_addr[pos], vram_addr, i - pos + incr);
> + (u64)device_addr[pos].addr, vram_addr,
> + i - pos + incr);
> __fence = xe_migrate_to_vram(vr->migrate,
> i - pos + incr,
> - dma_addr + pos,
> + &device_addr[pos],
> vram_addr);
> }
> if (IS_ERR(__fence)) {
> @@ -423,7 +426,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
> }
>
> /* Setup physical address of next device page */
> - if (dma_addr[i] && spage) {
> + if (device_addr[i].addr && spage) {
> vram_addr = __vram_addr;
> pos = i;
> } else {
> @@ -435,16 +438,16 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
> if (sram) {
> vm_dbg(&xe->drm,
> "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
> - vram_addr, (u64)dma_addr[pos], 1);
> + vram_addr, (u64)device_addr[pos].addr, 1);
> __fence = xe_migrate_from_vram(vr->migrate, 1,
> vram_addr,
> - dma_addr + pos);
> + &device_addr[pos]);
> } else {
> vm_dbg(&xe->drm,
> "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
> - (u64)dma_addr[pos], vram_addr, 1);
> + (u64)device_addr[pos].addr, vram_addr, 1);
> __fence = xe_migrate_to_vram(vr->migrate, 1,
> - dma_addr + pos,
> + &device_addr[pos],
> vram_addr);
> }
> if (IS_ERR(__fence)) {
> @@ -470,16 +473,18 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
> #undef XE_VRAM_ADDR_INVALID
> }
>
> -static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
> +static int xe_svm_copy_to_devmem(struct page **pages,
> + struct drm_pagemap_device_addr *device_addr,
> unsigned long npages)
> {
> - return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
> + return xe_svm_copy(pages, device_addr, npages, XE_SVM_COPY_TO_VRAM);
> }
>
> -static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
> +static int xe_svm_copy_to_ram(struct page **pages,
> + struct drm_pagemap_device_addr *device_addr,
> unsigned long npages)
> {
> - return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
> + return xe_svm_copy(pages, device_addr, npages, XE_SVM_COPY_TO_SRAM);
> }
>
> static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
> --
> 2.43.0
>
More information about the Intel-xe
mailing list