[PATCH 13/16] Revert "drm/amd/display: Fix VUpdate offset calculations for dcn401"
Roman.Li at amd.com
Roman.Li at amd.com
Wed Apr 2 16:13:17 UTC 2025
From: Dillon Varone <Dillon.Varone at amd.com>
This reverts commit 3ad2d94de0c72e31105cb8e607e7ca19f579bf4b.
Reason for revert: it causes stuttering in some usecases.
Reviewed-by: Aric Cyr <aric.cyr at amd.com>
Signed-off-by: Dillon Varone <Dillon.Varone at amd.com>
Signed-off-by: Roman Li <roman.li at amd.com>
---
.../amd/display/dc/hwss/dcn401/dcn401_hwseq.c | 44 -------------------
.../amd/display/dc/hwss/dcn401/dcn401_hwseq.h | 1 -
.../amd/display/dc/hwss/dcn401/dcn401_init.c | 2 +-
3 files changed, 1 insertion(+), 46 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 79f4eaf8fc52..5489f3d431f6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -2646,47 +2646,3 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
-
-/*
- * apply_front_porch_workaround
- *
- * This is a workaround for a bug that has existed since R5xx and has not been
- * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
- */
-static void apply_front_porch_workaround(
- struct dc_crtc_timing *timing)
-{
- if (timing->flags.INTERLACE == 1) {
- if (timing->v_front_porch < 2)
- timing->v_front_porch = 2;
- } else {
- if (timing->v_front_porch < 1)
- timing->v_front_porch = 1;
- }
-}
-
-int dcn401_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
-{
- const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
- struct dc_crtc_timing patched_crtc_timing;
- int vesa_sync_start;
- int asic_blank_end;
- int interlace_factor;
-
- patched_crtc_timing = *dc_crtc_timing;
- apply_front_porch_workaround(&patched_crtc_timing);
-
- interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
-
- vesa_sync_start = patched_crtc_timing.v_addressable +
- patched_crtc_timing.v_border_bottom +
- patched_crtc_timing.v_front_porch;
-
- asic_blank_end = (patched_crtc_timing.v_total -
- vesa_sync_start -
- patched_crtc_timing.v_border_top)
- * interlace_factor;
-
- return asic_blank_end -
- pipe_ctx->global_sync.dcn4x.vstartup_lines + 1;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 37c915568afc..781cf0efccc6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -109,5 +109,4 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
-int dcn401_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index aa9573ce44fc..fe7aceb2f510 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -73,7 +73,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.init_sys_ctx = dcn20_init_sys_ctx,
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
- .get_vupdate_offset_from_vsync = dcn401_get_vupdate_offset_from_vsync,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn401_apply_idle_power_optimizations,
.does_plane_fit_in_mall = NULL,
--
2.34.1
More information about the amd-gfx
mailing list