[Intel-xe] [PATCH v4 5/9] drm/xe/xe2: Update chunk size for each iteration of ccs copy

Himal Prasad Ghimiray himal.prasad.ghimiray at intel.com
Wed Dec 6 04:31:22 UTC 2023


In xe2 platform XY_CTRL_SURF_COPY_BLT can handle ccs copy for
max of 1024 main surface pages.

Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimiray at intel.com>
---
 drivers/gpu/drm/xe/xe_migrate.c | 34 ++++++++++++++++++++++++++++-----
 1 file changed, 29 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index b4dd1b6d78f0..98dca906a023 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -672,11 +672,24 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
 		u32 update_idx;
 		u64 ccs_ofs, ccs_size;
 		u32 ccs_pt;
+
 		bool usm = xe->info.supports_usm;
+		u32 avail_pts = NUM_PT_PER_BLIT;
 
 		src_L0 = xe_migrate_res_sizes(&src_it);
 		dst_L0 = xe_migrate_res_sizes(&dst_it);
 
+		/* In IGFX the XY_CTRL_SURF_COPY_BLT can handle max of 1024
+		 * pages. Hence limit the processing size to SZ_4M per
+		 * iteration.
+		 */
+		if (!IS_DGFX(xe) && GRAPHICS_VER(xe) >= 20) {
+			src_L0 = min_t(u64, src_L0, SZ_4M);
+			dst_L0 = min_t(u64, dst_L0, SZ_4M);
+
+			avail_pts = SZ_4M / SZ_2M;
+		}
+
 		drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n",
 			pass++, src_L0, dst_L0);
 
@@ -684,18 +697,18 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
 
 		batch_size += pte_update_size(m, src_is_vram, src, &src_it, &src_L0,
 					      &src_L0_ofs, &src_L0_pt, 0, 0,
-					      NUM_PT_PER_BLIT);
+					      avail_pts);
 
 		batch_size += pte_update_size(m, dst_is_vram, dst, &dst_it, &src_L0,
 					      &dst_L0_ofs, &dst_L0_pt, 0,
-					      NUM_PT_PER_BLIT, NUM_PT_PER_BLIT);
+					      avail_pts, avail_pts);
 
 		if (copy_system_ccs) {
 			ccs_size = xe_device_ccs_bytes(xe, src_L0);
 			batch_size += pte_update_size(m, false, NULL, &ccs_it, &ccs_size,
 						      &ccs_ofs, &ccs_pt, 0,
-						      2 * NUM_PT_PER_BLIT,
-						      NUM_PT_PER_BLIT);
+						      2 * avail_pts,
+						      avail_pts);
 		}
 
 		/* Add copy commands size here */
@@ -923,8 +936,19 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
 		struct xe_bb *bb;
 		u32 batch_size, update_idx;
 		bool usm = xe->info.supports_usm;
+		u32 avail_pts = NUM_PT_PER_BLIT;
 
 		clear_L0 = xe_migrate_res_sizes(&src_it);
+
+		/* In IGFX the XY_CTRL_SURF_COPY_BLT can handle max of 1024
+		 * pages. Hence limit the processing size to SZ_4M per
+		 * iteration.
+		 */
+		if (!IS_DGFX(xe) && GRAPHICS_VER(xe) >= 20) {
+			clear_L0 = min_t(u64, clear_L0, SZ_4M);
+			avail_pts = SZ_4M / SZ_2M;
+		}
+
 		drm_dbg(&xe->drm, "Pass %u, size: %llu\n", pass++, clear_L0);
 
 		/* Calculate final sizes and batch size.. */
@@ -932,7 +956,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
 			pte_update_size(m, clear_vram, src, &src_it,
 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
 					emit_clear_cmd_len(gt), 0,
-					NUM_PT_PER_BLIT);
+					avail_pts);
 		if (xe_device_has_flat_ccs(xe) && clear_vram)
 			batch_size += EMIT_COPY_CCS_DW;
 
-- 
2.25.1



More information about the Intel-xe mailing list