[PATCH v4 23/33] drm/xe: Add drm_pagemap ops to SVM

Matthew Auld matthew.auld at intel.com
Thu Jan 30 10:54:46 UTC 2025


On 29/01/2025 19:52, Matthew Brost wrote:
> From: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> 
> Add support for mapping device pages to Xe SVM by attaching drm_pagemap
> to a memory region, which is then linked to a GPU SVM devmem allocation.
> This enables GPU SVM to derive the device page address.
> 
> v3:
>   - Better commit message (Thomas)
>   - New drm_pagemap.h location
> 
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> ---
>   drivers/gpu/drm/xe/xe_device_types.h |  6 ++++++
>   drivers/gpu/drm/xe/xe_svm.c          | 31 ++++++++++++++++++++++++++++
>   2 files changed, 37 insertions(+)
> 
> diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
> index da5bf145324b..eb3702db5c17 100644
> --- a/drivers/gpu/drm/xe/xe_device_types.h
> +++ b/drivers/gpu/drm/xe/xe_device_types.h
> @@ -10,6 +10,7 @@
>   
>   #include <drm/drm_device.h>
>   #include <drm/drm_file.h>
> +#include <drm/drm_pagemap.h>
>   #include <drm/ttm/ttm_device.h>
>   
>   #include "xe_devcoredump_types.h"
> @@ -106,6 +107,11 @@ struct xe_mem_region {
>   	void __iomem *mapping;
>   	/** @pagemap: Used to remap device memory as ZONE_DEVICE */
>   	struct dev_pagemap pagemap;
> +	/**
> +	 * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
> +	 * pages of this tile.
> +	 */
> +	struct drm_pagemap dpagemap;
>   	/**
>   	 * @hpa_base: base host physical address
>   	 *
> diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
> index 985ac20c5b07..869a155fc9f7 100644
> --- a/drivers/gpu/drm/xe/xe_svm.c
> +++ b/drivers/gpu/drm/xe/xe_svm.c
> @@ -450,6 +450,33 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
>   }
>   
>   #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
> +static struct drm_pagemap_dma_addr
> +xe_drm_pagemap_map_dma(struct drm_pagemap *dpagemap,
> +		       struct device *dev,
> +		       struct page *page,
> +		       unsigned int order,
> +		       enum dma_data_direction dir)
> +{
> +	struct device *pgmap_dev = dpagemap->dev;
> +	enum drm_interconnect_protocol prot;
> +	dma_addr_t addr;
> +
> +	if (pgmap_dev == dev) {
> +		addr = xe_mem_region_page_to_dpa(page_to_mr(page), page);
> +		prot = XE_INTERCONNECT_VRAM;
> +	} else {
> +		addr = DMA_MAPPING_ERROR;
> +		prot = 0;
> +	}
> +
> +	return drm_pagemap_dma_addr_encode(addr, prot, order, dir);
> +}
> +
> +static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
> +	.map_dma = xe_drm_pagemap_map_dma,
> +};
> +
> +>>>>>>> 133db8ade5f0 (drm/xe: Add drm_pagemap ops to SVM)

Some leftover rebase damage here?

>   /**
>    * xe_devm_add: Remap and provide memmap backing for device memory
>    * @tile: tile that the memory region belongs to
> @@ -482,6 +509,10 @@ int xe_devm_add(struct xe_tile *tile, struct xe_mem_region *mr)
>   	mr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
>   	mr->pagemap.owner = xe_svm_devm_owner(xe);
>   	addr = devm_memremap_pages(dev, &mr->pagemap);
> +
> +	mr->dpagemap.dev = dev;
> +	mr->dpagemap.ops = &xe_drm_pagemap_ops;
> +
>   	if (IS_ERR(addr)) {
>   		devm_release_mem_region(dev, res->start, resource_size(res));
>   		ret = PTR_ERR(addr);



More information about the Intel-xe mailing list