[PATCH] drm/xe: Perform dma_map when moving system buffer objects to TT

Matthew Brost matthew.brost at intel.com
Tue Apr 30 16:47:31 UTC 2024


On Tue, Apr 30, 2024 at 02:02:14PM +0200, Thomas Hellström wrote:
> Currently we dma_map on ttm_tt population and dma_unmap when
> the pages are released in ttm_tt unpopulate.
> 
> Strictly, the dma_map is not needed until the bo is moved to the
> XE_PL_TT placement, so perform the dma_mapping on such moves
> instead, and remove the dma_mappig when moving to XE_PL_SYSTEM.
> 
> This is desired for the upcoming shrinker series where shrinking
> of a ttm_tt might fail. That would lead to an odd construct where
> we first dma_unmap, then shrink and if shrinking fails dma_map
> again. If dma_mapping instead is performed on move like this,
> shrinking does not need to care at all about dma mapping.
> 
> Finally, where a ttm_tt is destroyed while bound to a different
> memory type than XE_PL_SYSTEM, we keep the dma_unmap in
> unpopulate().
>

Makes sense.
 
> Signed-off-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> ---
>  drivers/gpu/drm/xe/xe_bo.c | 45 ++++++++++++++++++++++++--------------
>  1 file changed, 29 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
> index bc1f794e3e61..4c1dd67a4588 100644
> --- a/drivers/gpu/drm/xe/xe_bo.c
> +++ b/drivers/gpu/drm/xe/xe_bo.c
> @@ -302,6 +302,18 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
>  	return 0;
>  }
>  
> +static void xe_tt_unmap_sg(struct ttm_tt *tt)
> +{
> +	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
> +
> +	if (xe_tt->sg) {
> +		dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
> +				  DMA_BIDIRECTIONAL, 0);
> +		sg_free_table(xe_tt->sg);
> +		xe_tt->sg = NULL;
> +	}
> +}
> +
>  struct sg_table *xe_bo_sg(struct xe_bo *bo)
>  {
>  	struct ttm_tt *tt = bo->ttm.ttm;
> @@ -377,27 +389,15 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
>  	if (err)
>  		return err;
>  
> -	/* A follow up may move this xe_bo_move when BO is moved to XE_PL_TT */
> -	err = xe_tt_map_sg(tt);
> -	if (err)
> -		ttm_pool_free(&ttm_dev->pool, tt);
> -
>  	return err;
>  }
>  
>  static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
>  {
> -	struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
> -
>  	if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
>  		return;
>  
> -	if (xe_tt->sg) {
> -		dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
> -				  DMA_BIDIRECTIONAL, 0);
> -		sg_free_table(xe_tt->sg);
> -		xe_tt->sg = NULL;
> -	}
> +	xe_tt_unmap_sg(tt);
>  
>  	return ttm_pool_free(&ttm_dev->pool, tt);
>  }
> @@ -628,10 +628,14 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
>  	bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
>  				  ttm && ttm_tt_is_populated(ttm)) ? true : false;
>  	int ret = 0;
> +
>  	/* Bo creation path, moving to system or TT. */
>  	if ((!old_mem && ttm) && !handle_system_ccs) {
> -		ttm_bo_move_null(ttm_bo, new_mem);
> -		return 0;
> +		if (new_mem->mem_type == XE_PL_TT)
> +			ret = xe_tt_map_sg(ttm);
> +		if (!ret)
> +			ttm_bo_move_null(ttm_bo, new_mem);

Random ranting, ttm_bo_move_null is a terrible name. It is freeing the
old memory and assigning a new one.

> +		goto out;
>  	}
>  
>  	if (ttm_bo->type == ttm_bo_type_sg) {
> @@ -650,6 +654,12 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
>  	needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
>  		(!ttm && ttm_bo->type == ttm_bo_type_device);
>  
> +	if (new_mem->mem_type == XE_PL_TT) {
> +		ret = xe_tt_map_sg(ttm);
> +		if (ret)
> +			goto out;
> +	}
> +
>  	if ((move_lacks_source && !needs_clear)) {
>  		ttm_bo_move_null(ttm_bo, new_mem);
>  		goto out;
> @@ -786,8 +796,11 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
>  	xe_pm_runtime_put(xe);
>  
>  out:
> -	return ret;
> +	if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
> +	    ttm_bo->ttm)

So this is covering the case where we have moved to system and had
pages.

What about the case where evict fails after the 2nd instance of
'xe_tt_map_sg' in this function. I'm guessing xe_ttm_tt_unpopulate
covers that case?

Matt

> +		xe_tt_unmap_sg(ttm_bo->ttm);
>  
> +	return ret;
>  }
>  
>  /**
> -- 
> 2.44.0
> 


More information about the Intel-xe mailing list