[PATCH 3/9] drm/amd/display: Revert regression

Qingqing Zhuo qingqing.zhuo at amd.com
Wed Aug 5 17:40:52 UTC 2020


From: Alvin Lee <alvin.lee2 at amd.com>

[Why]
Caused pipe split regression

Signed-off-by: Alvin Lee <alvin.lee2 at amd.com>
Reviewed-by: Aric Cyr <Aric.Cyr at amd.com>
Acked-by: Qingqing Zhuo <qingqing.zhuo at amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc.c      |  10 --
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c    | 114 ------------------
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.h    |   7 --
 .../gpu/drm/amd/display/dc/dcn20/dcn20_init.c |   2 -
 .../gpu/drm/amd/display/dc/dcn21/dcn21_init.c |   2 -
 .../gpu/drm/amd/display/dc/dcn30/dcn30_init.c |   2 -
 .../gpu/drm/amd/display/dc/inc/hw_sequencer.h |   4 -
 7 files changed, 141 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index ebbb8182228d..5aa3b89471c3 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2300,7 +2300,6 @@ static void commit_planes_for_stream(struct dc *dc,
 		enum surface_update_type update_type,
 		struct dc_state *context)
 {
-	bool mpcc_disconnected = false;
 	int i, j;
 	struct pipe_ctx *top_pipe_to_program = NULL;
 
@@ -2331,15 +2330,6 @@ static void commit_planes_for_stream(struct dc *dc,
 		context_clock_trace(dc, context);
 	}
 
-	if (update_type != UPDATE_TYPE_FAST && dc->hwss.interdependent_update_lock &&
-		dc->hwss.disconnect_pipes && dc->hwss.wait_for_pending_cleared){
-		dc->hwss.interdependent_update_lock(dc, context, true);
-		mpcc_disconnected = dc->hwss.disconnect_pipes(dc, context);
-		dc->hwss.interdependent_update_lock(dc, context, false);
-		if (mpcc_disconnected)
-			dc->hwss.wait_for_pending_cleared(dc, context);
-	}
-
 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 73eb4e76a0b1..66180b4332f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1624,120 +1624,6 @@ static void dcn20_program_pipe(
 	}
 }
 
-bool dcn20_disconnect_pipes(
-		struct dc *dc,
-		struct dc_state *context)
-{
-		int i;
-		struct dce_hwseq *hws = dc->hwseq;
-		bool mpcc_disconnected = false;
-		DC_LOGGER_INIT(dc->ctx->logger);
-
-		/* Set pipe update flags and lock pipes */
-		for (i = 0; i < dc->res_pool->pipe_count; i++)
-			dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
-					&context->res_ctx.pipe_ctx[i]);
-
-		if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-			/* OTG blank before disabling all front ends */
-			for (i = 0; i < dc->res_pool->pipe_count; i++) {
-				if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
-					&& !context->res_ctx.pipe_ctx[i].top_pipe
-					&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
-					&& context->res_ctx.pipe_ctx[i].stream) {
-					hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
-				}
-			}
-
-			/* Disconnect mpcc */
-			for (i = 0; i < dc->res_pool->pipe_count; i++) {
-				if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
-					hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
-					DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
-					mpcc_disconnected = true;
-				}
-			}
-		}
-
-		if (mpcc_disconnected) {
-			for (i = 0; i < dc->res_pool->pipe_count; i++) {
-				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-				struct dc_plane_state *plane_state = pipe_ctx->plane_state;
-				struct hubp *hubp = pipe_ctx->plane_res.hubp;
-
-				if (!pipe_ctx || !plane_state || !pipe_ctx->stream)
-					continue;
-
-			// Only update scaler and viewport here if we lose a pipe split.
-			// This is to prevent half the screen from being black when we
-			// unlock after disconnecting MPCC.
-			if (!(old_pipe && !pipe_ctx->top_pipe &&
-				!pipe_ctx->bottom_pipe && old_pipe->bottom_pipe))
-				continue;
-
-			if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) {
-				if (pipe_ctx->update_flags.bits.scaler ||
-					plane_state->update_flags.bits.scaling_change ||
-					plane_state->update_flags.bits.position_change ||
-					plane_state->update_flags.bits.per_pixel_alpha_change ||
-					pipe_ctx->stream->update_flags.bits.scaling) {
-
-					pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
-					ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
-					/* scaler configuration */
-					pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
-					pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
-				}
-
-				if (pipe_ctx->update_flags.bits.viewport ||
-					(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
-					(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
-					(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
-
-					hubp->funcs->mem_program_viewport(
-						hubp,
-						&pipe_ctx->plane_res.scl_data.viewport,
-						&pipe_ctx->plane_res.scl_data.viewport_c);
-				}
-			}
-		}
-	}
-	return mpcc_disconnected;
-}
-
-void dcn20_wait_for_pending_cleared(struct dc *dc,
-		struct dc_state *context)
-{
-		struct pipe_ctx *pipe_ctx;
-		struct timing_generator *tg;
-		int i;
-
-		for (i = 0; i < dc->res_pool->pipe_count; i++) {
-			pipe_ctx = &context->res_ctx.pipe_ctx[i];
-			tg = pipe_ctx->stream_res.tg;
-
-			/*
-			 * Only wait for top pipe's tg penindg bit
-			 * Also skip if pipe is disabled.
-			 */
-			if (pipe_ctx->top_pipe ||
-			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
-			    !tg->funcs->is_tg_enabled(tg))
-				continue;
-
-			/*
-			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
-			 * For some reason waiting for OTG_UPDATE_PENDING cleared
-			 * seems to not trigger the update right away, and if we
-			 * lock again before VUPDATE then we don't get a separated
-			 * operation.
-			 */
-			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
-			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
-		}
-}
-
 void dcn20_program_front_end_for_ctx(
 		struct dc *dc,
 		struct dc_state *context)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 7befd35a2359..83220e34c1a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -131,13 +131,6 @@ void dcn20_dccg_init(struct dce_hwseq *hws);
 int dcn20_init_sys_ctx(struct dce_hwseq *hws,
 		struct dc *dc,
 		struct dc_phy_addr_space_config *pa_config);
-bool dcn20_disconnect_pipes(
-		struct dc *dc,
-		struct dc_state *context);
-
-void dcn20_wait_for_pending_cleared(struct dc *dc,
-		struct dc_state *context);
-                
 
 #ifndef TRIM_FSFT
 bool dcn20_optimize_timing_for_fsft(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 55afb70d33e5..3dde6f26de47 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -34,8 +34,6 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = NULL,
 	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
-	.disconnect_pipes = dcn20_disconnect_pipes,
-	.wait_for_pending_cleared = dcn20_wait_for_pending_cleared,
 	.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
 	.update_plane_addr = dcn20_update_plane_addr,
 	.update_dchub = dcn10_update_dchub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 264302b27d4f..b187f71afa65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -35,8 +35,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = NULL,
 	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
-	.disconnect_pipes = dcn20_disconnect_pipes,
-	.wait_for_pending_cleared = dcn20_wait_for_pending_cleared,
 	.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
 	.update_plane_addr = dcn20_update_plane_addr,
 	.update_dchub = dcn10_update_dchub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 845aaf1c816c..9afee7160490 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -35,8 +35,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
 	.apply_ctx_for_surface = NULL,
 	.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
-	.disconnect_pipes = dcn20_disconnect_pipes,
-	.wait_for_pending_cleared = dcn20_wait_for_pending_cleared,
 	.post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
 	.update_plane_addr = dcn20_update_plane_addr,
 	.update_dchub = dcn10_update_dchub,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 64c1be818b0e..3c986717dcd5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -67,10 +67,6 @@ struct hw_sequencer_funcs {
 			int num_planes, struct dc_state *context);
 	void (*program_front_end_for_ctx)(struct dc *dc,
 			struct dc_state *context);
-	bool (*disconnect_pipes)(struct dc *dc,
-			struct dc_state *context);
-	void (*wait_for_pending_cleared)(struct dc *dc,
-			struct dc_state *context);
 	void (*post_unlock_program_front_end)(struct dc *dc,
 			struct dc_state *context);
 	void (*update_plane_addr)(const struct dc *dc,
-- 
2.17.1



More information about the amd-gfx mailing list