[PATCH 15/17] drm/amd/display: Fix VUpdate offset calculations for dcn401
Fangzhi Zuo
Jerry.Zuo at amd.com
Wed Mar 26 19:49:12 UTC 2025
From: Dillon Varone <dillon.varone at amd.com>
[WHY&HOW]
DCN401 uses a different structure to store the VStartup offset used to
calculate the VUpdate position, so adjust the calculations to use this
value.
Reviewed-by: Aric Cyr <aric.cyr at amd.com>
Signed-off-by: Dillon Varone <dillon.varone at amd.com>
Signed-off-by: Fangzhi Zuo <jerry.zuo at amd.com>
---
.../amd/display/dc/hwss/dcn401/dcn401_hwseq.c | 44 +++++++++++++++++++
.../amd/display/dc/hwss/dcn401/dcn401_hwseq.h | 1 +
.../amd/display/dc/hwss/dcn401/dcn401_init.c | 2 +-
3 files changed, 46 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 5489f3d431f6..79f4eaf8fc52 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -2646,3 +2646,47 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
+
+/*
+ * apply_front_porch_workaround
+ *
+ * This is a workaround for a bug that has existed since R5xx and has not been
+ * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+ */
+static void apply_front_porch_workaround(
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+int dcn401_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
+ struct dc_crtc_timing patched_crtc_timing;
+ int vesa_sync_start;
+ int asic_blank_end;
+ int interlace_factor;
+
+ patched_crtc_timing = *dc_crtc_timing;
+ apply_front_porch_workaround(&patched_crtc_timing);
+
+ interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
+
+ vesa_sync_start = patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom +
+ patched_crtc_timing.v_front_porch;
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+ patched_crtc_timing.v_border_top)
+ * interlace_factor;
+
+ return asic_blank_end -
+ pipe_ctx->global_sync.dcn4x.vstartup_lines + 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 781cf0efccc6..37c915568afc 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -109,4 +109,5 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+int dcn401_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index fe7aceb2f510..aa9573ce44fc 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -73,7 +73,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.init_sys_ctx = dcn20_init_sys_ctx,
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
- .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .get_vupdate_offset_from_vsync = dcn401_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn401_apply_idle_power_optimizations,
.does_plane_fit_in_mall = NULL,
--
2.43.0
More information about the amd-gfx
mailing list