[PATCH 17/20] drm/amd/display: Clean up locking in dcn*_apply_ctx_for_surface()

sunpeng.li at amd.com sunpeng.li at amd.com
Wed Apr 3 21:59:56 UTC 2019


From: Leo Li <sunpeng.li at amd.com>

[Why]

dcn*_disable_plane() doesn't unlock the pipe anymore, making the extra
lock unnecessary.

In addition - during full plane updates - all necessary pipes should be
locked/unlocked together when modifying hubp to avoid tearing in
pipesplit setups.

[How]

Remove redundant locks, and add function to lock all pipes. If an
interdependent pipe update is required, lock down all pipes. Otherwise,
lock only the top pipe for the updated pipe tree.

Signed-off-by: Leo Li <sunpeng.li at amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
Acked-by: Nicholas Kazlauskas <Nicholas.Kazlauskas at amd.com>
---
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  | 66 +++++++++++++++-------
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h  |  4 ++
 2 files changed, 49 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index c2b93a8..dab3706 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2329,6 +2329,7 @@ static void dcn10_apply_ctx_for_surface(
 	int i;
 	struct timing_generator *tg;
 	bool removed_pipe[4] = { false };
+	bool interdependent_update = false;
 	struct pipe_ctx *top_pipe_to_program =
 			find_top_pipe_for_stream(dc, context, stream);
 	DC_LOGGER_INIT(dc->ctx->logger);
@@ -2338,7 +2339,13 @@ static void dcn10_apply_ctx_for_surface(
 
 	tg = top_pipe_to_program->stream_res.tg;
 
-	dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
+	interdependent_update = top_pipe_to_program->plane_state &&
+		top_pipe_to_program->plane_state->update_flags.bits.full_update;
+
+	if (interdependent_update)
+		lock_all_pipes(dc, context, true);
+	else
+		dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
 
 	if (num_planes == 0) {
 		/* OTG blank before remove all front end */
@@ -2358,15 +2365,9 @@ static void dcn10_apply_ctx_for_surface(
 		 */
 		if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
 			if (old_pipe_ctx->stream_res.tg == tg &&
-				old_pipe_ctx->plane_res.hubp &&
-				old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
+			    old_pipe_ctx->plane_res.hubp &&
+			    old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
 				dcn10_disable_plane(dc, old_pipe_ctx);
-				/*
-				 * power down fe will unlock when calling reset, need
-				 * to lock it back here. Messy, need rework.
-				 */
-				pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
-			}
 		}
 
 		if ((!pipe_ctx->plane_state ||
@@ -2385,29 +2386,25 @@ static void dcn10_apply_ctx_for_surface(
 	if (num_planes > 0)
 		program_all_pipe_in_tree(dc, top_pipe_to_program, context);
 
-	dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
-
-	if (top_pipe_to_program->plane_state &&
-			top_pipe_to_program->plane_state->update_flags.bits.full_update)
+	if (interdependent_update)
 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-			tg = pipe_ctx->stream_res.tg;
 			/* Skip inactive pipes and ones already updated */
-			if (!pipe_ctx->stream || pipe_ctx->stream == stream
-					|| !pipe_ctx->plane_state
-					|| !tg->funcs->is_tg_enabled(tg))
+			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
+			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
 				continue;
 
-			tg->funcs->lock(tg);
-
 			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
 				pipe_ctx->plane_res.hubp,
 				&pipe_ctx->dlg_regs,
 				&pipe_ctx->ttu_regs);
-
-			tg->funcs->unlock(tg);
 		}
 
+	if (interdependent_update)
+		lock_all_pipes(dc, context, false);
+	else
+		dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+
 	if (num_planes == 0)
 		false_optc_underflow_wa(dc, stream, tg);
 
@@ -2814,6 +2811,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
 	return vertical_line_start;
 }
 
+void lock_all_pipes(struct dc *dc,
+	struct dc_state *context,
+	bool lock)
+{
+	struct pipe_ctx *pipe_ctx;
+	struct timing_generator *tg;
+	int i;
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		tg = pipe_ctx->stream_res.tg;
+		/*
+		 * Only lock the top pipe's tg to prevent redundant
+		 * (un)locking. Also skip if pipe is disabled.
+		 */
+		if (pipe_ctx->top_pipe ||
+		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
+		    !tg->funcs->is_tg_enabled(tg))
+			continue;
+
+		if (lock)
+			tg->funcs->lock(tg);
+		else
+			tg->funcs->unlock(tg);
+	}
+}
+
 static void calc_vupdate_position(
 		struct pipe_ctx *pipe_ctx,
 		uint32_t *start_line,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 6d66084..4b3b27a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
 
 int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
 
+void lock_all_pipes(struct dc *dc,
+	struct dc_state *context,
+	bool lock);
+
 #endif /* __DC_HWSS_DCN10_H__ */
-- 
2.7.4



More information about the amd-gfx mailing list