[PATCH v7 06/10] drm/xe/xe2: Update chunk size for each iteration of ccs copy

Thomas Hellström thomas.hellstrom at linux.intel.com
Tue Dec 12 12:27:31 UTC 2023


On 12/11/23 14:43, Himal Prasad Ghimiray wrote:
> In xe2 platform XY_CTRL_SURF_COPY_BLT can handle ccs copy for
> max of 1024 main surface pages.
>
> v2:
>   - Use better logic to determine chunk size (Matt/Thomas)
>
> Cc: Matt Roper <matthew.d.roper at intel.com>
> Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
> ---
>   drivers/gpu/drm/xe/xe_migrate.c | 33 ++++++++++++++++++++++-----------
>   1 file changed, 22 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> index 1016e2591737..9698986eab06 100644
> --- a/drivers/gpu/drm/xe/xe_migrate.c
> +++ b/drivers/gpu/drm/xe/xe_migrate.c
> @@ -65,9 +65,15 @@ struct xe_migrate {
>   };
>   
>   #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */
> +#define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */
> +
> +#define MAX_MEM_TRANSFER_PER_PASS(_xe) ((!IS_DGFX(_xe) && GRAPHICS_VER(_xe) >= 20 && \
> +					 xe_device_has_flat_ccs(_xe)) ? \
> +					 MAX_CCS_LIMITED_TRANSFER : MAX_PREEMPTDISABLE_TRANSFER)
Nit: perhaps open-code instead of macro:

max_mem_transfer_per_pass = ...

Either way

Reviewed-by: Thomas Hellström <thomas.hellstrom at linux.intel.com>


>   #define NUM_KERNEL_PDE 17
>   #define NUM_PT_SLOTS 32
> -#define NUM_PT_PER_BLIT (MAX_PREEMPTDISABLE_TRANSFER / SZ_2M)
> +#define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M
> +#define NUM_PT_PER_BLIT(_xe) (MAX_MEM_TRANSFER_PER_PASS(_xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE)
>   
>   /**
>    * xe_tile_migrate_engine() - Get this tile's migrate engine.
> @@ -366,14 +372,14 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
>   	return m;
>   }
>   
> -static u64 xe_migrate_res_sizes(struct xe_res_cursor *cur)
> +static u64 xe_migrate_res_sizes(struct xe_device *xe, struct xe_res_cursor *cur)
>   {
>   	/*
>   	 * For VRAM we use identity mapped pages so we are limited to current
>   	 * cursor size. For system we program the pages ourselves so we have no
>   	 * such limitation.
>   	 */
> -	return min_t(u64, MAX_PREEMPTDISABLE_TRANSFER,
> +	return min_t(u64, MAX_MEM_TRANSFER_PER_PASS(xe),
>   		     mem_type_is_vram(cur->mem_type) ? cur->size :
>   		     cur->remaining);
>   }
> @@ -672,10 +678,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
>   		u32 update_idx;
>   		u64 ccs_ofs, ccs_size;
>   		u32 ccs_pt;
> +
>   		bool usm = xe->info.has_usm;
> +		u32 avail_pts = NUM_PT_PER_BLIT(xe);
>   
> -		src_L0 = xe_migrate_res_sizes(&src_it);
> -		dst_L0 = xe_migrate_res_sizes(&dst_it);
> +		src_L0 = xe_migrate_res_sizes(xe, &src_it);
> +		dst_L0 = xe_migrate_res_sizes(xe, &dst_it);
>   
>   		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
>   			pass++, src_L0, dst_L0);
> @@ -684,18 +692,18 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
>   
>   		batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
>   					      &src_L0_ofs, &src_L0_pt, 0, 0,
> -					      NUM_PT_PER_BLIT);
> +					      avail_pts);
>   
>   		batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
>   					      &dst_L0_ofs, &dst_L0_pt, 0,
> -					      NUM_PT_PER_BLIT, NUM_PT_PER_BLIT);
> +					      avail_pts, avail_pts);
>   
>   		if (copy_system_ccs) {
>   			ccs_size = xe_device_ccs_bytes(xe, src_L0);
>   			batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
>   						      &ccs_ofs, &ccs_pt, 0,
> -						      2 * NUM_PT_PER_BLIT,
> -						      NUM_PT_PER_BLIT);
> +						      2 * avail_pts,
> +						      avail_pts);
>   		}
>   
>   		/* Add copy commands size here */
> @@ -922,9 +930,12 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
>   		struct xe_sched_job *job;
>   		struct xe_bb *bb;
>   		u32 batch_size, update_idx;
> +
>   		bool usm = xe->info.has_usm;
> +		u32 avail_pts = NUM_PT_PER_BLIT(xe);
> +
> +		clear_L0 = xe_migrate_res_sizes(xe, &src_it);
>   
> -		clear_L0 = xe_migrate_res_sizes(&src_it);
>   		drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
>   
>   		/* Calculate final sizes and batch size.. */
> @@ -932,7 +943,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
>   			pte_update_size(m, clear_vram, src, &src_it,
>   					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
>   					emit_clear_cmd_len(gt), 0,
> -					NUM_PT_PER_BLIT);
> +					avail_pts);
>   		if (xe_device_has_flat_ccs(xe) && clear_vram)
>   			batch_size += EMIT_COPY_CCS_DW;
>   


More information about the Intel-xe mailing list