[Intel-xe] [PATCH v2] drm/xe: Infer service copy functionality from engine list

Matt Roper matthew.d.roper at intel.com
Wed Sep 27 20:51:44 UTC 2023


On platforms with multiple BCS engines (i.e., PVC and Xe2), not all BCS
engines are created equal.  The BCS0 engine is what the specs refer to
as a "resource copy engine," which supports the platform's full set of
copy/fill instructions.  In contast, the non-BCS0 engines are more
streamlined and only support a subset of the GPU instructions supported
by the resource copy engine.  Platforms with both types of copy engines
always support the MEM_COPY and MEM_SET instructions which can be used
for simple copy and fill operations on either type of BCS engine.  Since
the simple MEM_SET instruction meets the needs of Xe's migrate code (and
since the more elaborate XY_FAST_COLOR_BLT instruction isn't available
to use on service copy engines), we always prefer to use MEM_SET for
clearing buffers on our newer platforms.

We've been using a 'has_link_copy_engine' feature flag to keep track of
which platforms should use MEM_SET for fills.  However a feature flag
like this is unnecessary since we can already derive the presence of
service copy engines (and in turn the MEM_SET instruction) just by
looking at the platform's pre-fusing engine list.  Utilizing the engine
list for this also avoids mistakes like we've made on Xe2 where we
forget to set the feature flag in the IP definition.

For clarity, "service copy" is a general term that covers any blitter
engines that support a limited subset of the overall blitter instruction
set (in practice this is any non-BCS0 blitter engine).  The "link copy
engines" introduced on PVC and the "paging copy engine" present in Xe2
are both instances of service copy engines.

v2:
 - Rewrite / expand the commit message.  (Bala)
 - Fix checkpatch whitespace error.

Bspec: 65019
Cc: Lucas De Marchi <lucas.demarchi at intel.com>
Cc: Balasubramani Vivekanandan <balasubramani.vivekanandan at intel.com>
Signed-off-by: Matt Roper <matthew.d.roper at intel.com>
Reviewed-by: Haridhar Kalvala <haridhar.kalvala at intel.com>
---
 drivers/gpu/drm/xe/xe_device_types.h |  2 --
 drivers/gpu/drm/xe/xe_migrate.c      | 34 +++++++++++++++++-----------
 drivers/gpu/drm/xe/xe_pci.c          |  2 --
 drivers/gpu/drm/xe/xe_pci_types.h    |  1 -
 4 files changed, 21 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 32ab0fea04ee..2d20bcbe8d9b 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -234,8 +234,6 @@ struct xe_device {
 		u8 has_llc:1;
 		/** @has_range_tlb_invalidation: Has range based TLB invalidations */
 		u8 has_range_tlb_invalidation:1;
-		/** @has_link_copy_engines: Whether the platform has link copy engines */
-		u8 has_link_copy_engine:1;
 		/** @enable_display: display enabled */
 		u8 enable_display:1;
 
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 9438f609d18b..3cf7412c79aa 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -854,28 +854,36 @@ static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
 	bb->len += len;
 }
 
-static u32 emit_clear_cmd_len(struct xe_device *xe)
+static bool has_service_copy_support(struct xe_gt *gt)
 {
-	if (xe->info.has_link_copy_engine)
+	/*
+	 * What we care about is whether the architecture was designed with
+	 * service copy functionality (specifically the new MEM_SET / MEM_COPY
+	 * instructions) so check the architectural engine list rather than the
+	 * actual list since these instructions are usable on BCS0 even if
+	 * all of the actual service copy engines (BCS1-BCS8) have been fused
+	 * off.
+	 */
+	return gt->info.__engine_mask & GENMASK(XE_HW_ENGINE_BCS8,
+						XE_HW_ENGINE_BCS1);
+}
+
+static u32 emit_clear_cmd_len(struct xe_gt *gt)
+{
+	if (has_service_copy_support(gt))
 		return PVC_MEM_SET_CMD_LEN_DW;
 	else
 		return XY_FAST_COLOR_BLT_DW;
 }
 
-static int emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
-		      u32 size, u32 pitch, bool is_vram)
+static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
+		       u32 size, u32 pitch, bool is_vram)
 {
-	struct xe_device *xe = gt_to_xe(gt);
-
-	if (xe->info.has_link_copy_engine) {
+	if (has_service_copy_support(gt))
 		emit_clear_link_copy(gt, bb, src_ofs, size, pitch);
-
-	} else {
+	else
 		emit_clear_main_copy(gt, bb, src_ofs, size, pitch,
 				     is_vram);
-	}
-
-	return 0;
 }
 
 /**
@@ -928,7 +936,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
 		batch_size = 2 +
 			pte_update_size(m, clear_vram, src, &src_it,
 					&clear_L0, &clear_L0_ofs, &clear_L0_pt,
-					emit_clear_cmd_len(xe), 0,
+					emit_clear_cmd_len(gt), 0,
 					NUM_PT_PER_BLIT);
 		if (xe_device_has_flat_ccs(xe) && clear_vram)
 			batch_size += EMIT_COPY_CCS_DW;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index a11163b89a3f..0da752b3c9cb 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -138,7 +138,6 @@ static const struct xe_graphics_desc graphics_xehpc = {
 
 	.has_asid = 1,
 	.has_flat_ccs = 0,
-	.has_link_copy_engine = 1,
 	.supports_usm = 1,
 };
 
@@ -574,7 +573,6 @@ static int xe_info_init(struct xe_device *xe,
 	xe->info.has_asid = graphics_desc->has_asid;
 	xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
 	xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
-	xe->info.has_link_copy_engine = graphics_desc->has_link_copy_engine;
 
 	xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
 				  enable_display &&
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index df6ddbc52d7f..bd0b0d87413e 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -24,7 +24,6 @@ struct xe_graphics_desc {
 
 	u8 has_asid:1;
 	u8 has_flat_ccs:1;
-	u8 has_link_copy_engine:1;
 	u8 has_range_tlb_invalidation:1;
 	u8 supports_usm:1;
 };
-- 
2.41.0



More information about the Intel-xe mailing list