[PATCH] drm/amdgpu: Use iterator methods exposed by amdgpu_res_cursor.h in building SG_TABLE's for a VRAM BO

Christian König ckoenig.leichtzumerken at gmail.com
Tue Apr 13 18:08:18 UTC 2021


Am 13.04.21 um 19:17 schrieb Ramesh Errabolu:
> Extend current implementation of SG_TABLE construction method to
> allow exportation of sub-buffers of a VRAM BO. This capability will
> enable logical partitioning of a VRAM BO into multiple non-overlapping
> sub-buffers. One example of this use case is to partition a VRAM BO
> into two sub-buffers, one for SRC and another for DST.
>
> Signed-off-by: Ramesh Errabolu <Ramesh.Errabolu at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c  |  6 ++--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h      |  1 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 34 ++++++++++++++------
>   3 files changed, 29 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index e0c4f7c7f1b9..57534b93bd91 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -255,6 +255,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
>   	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
>   	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
>   	struct sg_table *sgt;
> +	u64 num_bytes;
>   	long r;
>   
>   	if (!bo->tbo.pin_count) {
> @@ -291,8 +292,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
>   		break;
>   
>   	case TTM_PL_VRAM:
> -		r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
> -					      dir, &sgt);
> +		num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;

Please rather use bo->tbo.base.size here. It is already in bytes.

> +		r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0, num_bytes,
> +					      attach->dev, dir, &sgt);
>   		if (r)
>   			return ERR_PTR(r);
>   		break;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> index dec0db8b0b13..9e38475e0f8d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
> @@ -112,6 +112,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
>   u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
>   int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
>   			      struct ttm_resource *mem,
> +			      u64 offset, u64 size,
>   			      struct device *dev,
>   			      enum dma_data_direction dir,
>   			      struct sg_table **sgt);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> index 592a2dd16493..c1a7772fa8e8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> @@ -25,6 +25,7 @@
>   #include <linux/dma-mapping.h>
>   #include "amdgpu.h"
>   #include "amdgpu_vm.h"
> +#include "amdgpu_res_cursor.h"
>   #include "amdgpu_atomfirmware.h"
>   #include "atom.h"
>   
> @@ -565,6 +566,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
>    *
>    * @adev: amdgpu device pointer
>    * @mem: TTM memory object
> + * @offset: byte offset from the base of VRAM BO
> + * @length: number of bytes to export in sg_table
>    * @dev: the other device
>    * @dir: dma direction
>    * @sgt: resulting sg table
> @@ -573,39 +576,47 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
>    */
>   int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
>   			      struct ttm_resource *mem,
> +			      u64 offset, u64 length,
>   			      struct device *dev,
>   			      enum dma_data_direction dir,
>   			      struct sg_table **sgt)
>   {
> -	struct drm_mm_node *node;
> +	struct amdgpu_res_cursor cursor;
>   	struct scatterlist *sg;
>   	int num_entries = 0;
> -	unsigned int pages;
>   	int i, r;
>   
>   	*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
>   	if (!*sgt)
>   		return -ENOMEM;
>   
> -	for (pages = mem->num_pages, node = mem->mm_node;
> -	     pages; pages -= node->size, ++node)
> -		++num_entries;
> +	/* Determine the number of DRM_MM nodes to export */
> +	amdgpu_res_first(mem, offset, length, &cursor);
> +	while (cursor.remaining) {
> +		num_entries++;
> +		amdgpu_res_next(&cursor, cursor.size);
> +	}
>   
>   	r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
>   	if (r)
>   		goto error_free;
>   
> +	/* Initialize scatterlist nodes of sg_table */
>   	for_each_sgtable_sg((*sgt), sg, i)
>   		sg->length = 0;
>   
> -	node = mem->mm_node;
> +	/*
> +	 * Walk down DRM_MM nodes to populate scatterlist nodes
> +	 * @note: Use iterator api to get first the DRM_MM node
> +	 * and the number of bytes from it. Access the following
> +	 * DRM_MM node(s) if more buffer needs to exported
> +	 */
> +	amdgpu_res_first(mem, offset, length, &cursor);
>   	for_each_sgtable_sg((*sgt), sg, i) {
> -		phys_addr_t phys = (node->start << PAGE_SHIFT) +
> -			adev->gmc.aper_base;
> -		size_t size = node->size << PAGE_SHIFT;
>   		dma_addr_t addr;
> +		size_t size = cursor.size;
> +		phys_addr_t phys = cursor.start + adev->gmc.aper_base;

Please reorder the declarations in reverse xmas tree order.

Apart from those nit picks the patch looks good to me.

Thanks,
Christian.

>   
> -		++node;
>   		addr = dma_map_resource(dev, phys, size, dir,
>   					DMA_ATTR_SKIP_CPU_SYNC);
>   		r = dma_mapping_error(dev, addr);
> @@ -615,7 +626,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
>   		sg_set_page(sg, NULL, size, 0);
>   		sg_dma_address(sg) = addr;
>   		sg_dma_len(sg) = size;
> +
> +		amdgpu_res_next(&cursor, cursor.size);
>   	}
> +
>   	return 0;
>   
>   error_unmap:



More information about the amd-gfx mailing list