[PATCH 11/16] drm/amd/display: Add new sequence for 4-lane HBR3 on vendor specific retimers

Alex Hung alex.hung at amd.com
Wed Jul 19 18:28:01 UTC 2023


From: Ovidiu Bunea <ovidiu.bunea at amd.com>

[Why]
In some vendor specific retimer setups for downstream 4-lane HBR3
configuration, the sink will show severe corruption (horizontal shifting)
and intermittent blanking.

[How]
Add new retimer programming sequence before clock recovery when 4 lanes
are active.

Reviewed-by: Michael Strauss <michael.strauss at amd.com>
Acked-by: Alex Hung <alex.hung at amd.com>
Signed-off-by: Ovidiu Bunea <ovidiu.bunea at amd.com>
---
 .../link_dp_training_fixed_vs_pe_retimer.c    | 67 ++++++++++++++++++-
 1 file changed, 66 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 2a4f70acc7fa..ca0543e62917 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -236,6 +236,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
 	uint32_t pre_disable_intercept_delay_ms = 0;
 	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
 	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+	const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+	const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+	const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+	const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+	const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
 	uint32_t vendor_lttpr_write_address = 0xF004F;
 	enum link_training_result status = LINK_TRAINING_SUCCESS;
 	uint8_t lane = 0;
@@ -338,6 +343,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
 		DP_DOWNSPREAD_CTRL,
 		lt_settings->link_settings.link_spread);
 
+	if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_1[0],
+				sizeof(vendor_lttpr_write_data_4lane_1));
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_2[0],
+				sizeof(vendor_lttpr_write_data_4lane_2));
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_3[0],
+				sizeof(vendor_lttpr_write_data_4lane_3));
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_4[0],
+				sizeof(vendor_lttpr_write_data_4lane_4));
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_5[0],
+				sizeof(vendor_lttpr_write_data_4lane_5));
+	}
+
 	/* 2. Perform link training */
 
 	/* Perform Clock Recovery Sequence */
@@ -598,7 +631,11 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
 	uint32_t pre_disable_intercept_delay_ms = 0;
 	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
 	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
-
+	const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
+	const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
+	const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
+	const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
+	const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
 	uint32_t vendor_lttpr_write_address = 0xF004F;
 	enum link_training_result status = LINK_TRAINING_SUCCESS;
 	uint8_t lane = 0;
@@ -701,6 +738,34 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
 		DP_DOWNSPREAD_CTRL,
 		lt_settings->link_settings.link_spread);
 
+	if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_1[0],
+				sizeof(vendor_lttpr_write_data_4lane_1));
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_2[0],
+				sizeof(vendor_lttpr_write_data_4lane_2));
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_3[0],
+				sizeof(vendor_lttpr_write_data_4lane_3));
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_4[0],
+				sizeof(vendor_lttpr_write_data_4lane_4));
+		core_link_write_dpcd(
+				link,
+				vendor_lttpr_write_address,
+				&vendor_lttpr_write_data_4lane_5[0],
+				sizeof(vendor_lttpr_write_data_4lane_5));
+	}
+
 	/* 2. Perform link training */
 
 	/* Perform Clock Recovery Sequence */
-- 
2.41.0



More information about the amd-gfx mailing list