[PATCH 01/18] drm/amd/display: Extend w/a for hard hang on HPD to dcn20

Rodrigo Siqueira Rodrigo.Siqueira at amd.com
Fri Sep 17 17:46:59 UTC 2021


From: Qingqing Zhuo <qingqing.zhuo at amd.com>

[Why]
HPD disable and enable sequences are not mutually exclusive on Linux.
For HPDs that spans under 1s (i.e. HPD low = 1s), part of the disable
sequence (specifically, a request to SMU to lower refclk) could come
right before the call to PHY enablement, causing DMUB to access an
irresponsive PHY and thus a hard hang on the system.

[How]
Disable 48mhz refclk off when there is any HPD status in connected state
for dcn20.

Reviewed-by: Hersen Wu <hersenxs.wu at amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
Signed-off-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
---
 .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c  | 12 ++++++++-
 .../display/dc/irq/dcn20/irq_service_dcn20.c  | 25 +++++++++++++++++++
 .../display/dc/irq/dcn20/irq_service_dcn20.h  |  2 ++
 3 files changed, 38 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d01aa9f15a6..315466f5aade 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -38,6 +38,8 @@
 #include "clk/clk_11_0_0_offset.h"
 #include "clk/clk_11_0_0_sh_mask.h"
 
+#include "irq/dcn20/irq_service_dcn20.h"
+
 #undef FN
 #define FN(reg_name, field_name) \
 	clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
@@ -221,6 +223,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 	bool force_reset = false;
 	bool p_state_change_support;
 	int total_plane_count;
+	int irq_src;
+	uint32_t hpd_state;
 
 	if (dc->work_arounds.skip_clock_update)
 		return;
@@ -238,7 +242,13 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
 	if (dc->res_pool->pp_smu)
 		pp_smu = &dc->res_pool->pp_smu->nv_funcs;
 
-	if (display_count == 0)
+	for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) {
+		hpd_state = dal_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src);
+		if (hpd_state)
+			break;
+	}
+
+	if (display_count == 0 && !hpd_state)
 		enter_display_off = true;
 
 	if (enter_display_off == safe_to_lower) {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index c4b067d01895..49d87fe5c167 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -132,6 +132,31 @@ enum dc_irq_source to_dal_irq_source_dcn20(
 	}
 }
 
+uint32_t dal_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source)
+{
+	const struct irq_source_info *info;
+	uint32_t addr;
+	uint32_t value;
+	uint32_t current_status;
+
+	info = find_irq_source_info(irq_service, source);
+	if (!info)
+		return 0;
+
+	addr = info->status_reg;
+	if (!addr)
+		return 0;
+
+	value = dm_read_reg(irq_service->ctx, addr);
+	current_status =
+		get_reg_field_value(
+			value,
+			HPD0_DC_HPD_INT_STATUS,
+			DC_HPD_SENSE);
+
+	return current_status;
+}
+
 static bool hpd_ack(
 	struct irq_service *irq_service,
 	const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
index aee4b37999f1..f60a203e7188 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
@@ -31,4 +31,6 @@
 struct irq_service *dal_irq_service_dcn20_create(
 	struct irq_service_init_data *init_data);
 
+uint32_t dal_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source);
+
 #endif
-- 
2.25.1



More information about the amd-gfx mailing list