[PATCH 17/28] drm/amd/display: Move lock to front end program.

Harry Wentland harry.wentland at amd.com
Thu Oct 19 17:46:36 UTC 2017


From: Yongqiang Sun <yongqiang.sun at amd.com>

Moved lock and unlock to apply_ctx_to_surface, since all
the front end programming is within apply_ctx_to_surface.

Signed-off-by: Yongqiang Sun <yongqiang.sun at amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
Acked-by: Harry Wentland <Harry.Wentland at amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc.c           | 52 +---------------------
 .../amd/display/dc/dce110/dce110_hw_sequencer.c    | 16 +++++++
 .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c  | 13 +++++-
 3 files changed, 29 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0e078c91a674..503817ac0429 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -799,7 +799,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 	struct dc_bios *dcb = dc->ctx->dc_bios;
 	enum dc_status result = DC_ERROR_UNEXPECTED;
 	struct pipe_ctx *pipe;
-	int i, j, k, l;
+	int i, k, l;
 	struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
 
 	for (i = 0; i < context->stream_count; i++)
@@ -853,15 +853,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 
 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
-	for (i = 0; i < context->stream_count; i++) {
-		for (j = 0; j < MAX_PIPES; j++) {
-			pipe = &context->res_ctx.pipe_ctx[j];
-
-			if (!pipe->top_pipe && pipe->stream == context->streams[i])
-				dc->hwss.pipe_control_lock(dc, pipe, false);
-		}
-	}
-
 	dc_release_state(dc->current_state);
 
 	dc->current_state = context;
@@ -1265,27 +1256,6 @@ static void commit_planes_for_stream(struct dc *dc,
 		return;
 	}
 
-	/* Lock pipes for provided surfaces, or all active if full update*/
-	for (i = 0; i < surface_count; i++) {
-		struct dc_plane_state *plane_state = srf_updates[i].surface;
-
-		for (j = 0; j < dc->res_pool->pipe_count; j++) {
-			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
-			if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
-				continue;
-			if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
-				continue;
-
-			dc->hwss.pipe_control_lock(
-					dc,
-					pipe_ctx,
-					true);
-		}
-		if (update_type == UPDATE_TYPE_FULL)
-			break;
-	}
-
 	/* Full fe update*/
 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -1342,26 +1312,6 @@ static void commit_planes_for_stream(struct dc *dc,
 			}
 		}
 	}
-
-	/* Unlock pipes */
-	for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
-		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-		for (j = 0; j < surface_count; j++) {
-			if (update_type != UPDATE_TYPE_FULL &&
-			    srf_updates[j].surface != pipe_ctx->plane_state)
-				continue;
-			if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
-				continue;
-
-			dc->hwss.pipe_control_lock(
-					dc,
-					pipe_ctx,
-					false);
-
-			break;
-		}
-	}
 }
 
 void dc_commit_updates_for_stream(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index e8b8daeeed97..73072650530c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2886,8 +2886,14 @@ static void dce110_apply_ctx_for_surface(
 
 	be_idx = -1;
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
 		if (stream == context->res_ctx.pipe_ctx[i].stream) {
 			be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
+			if (!pipe_ctx->top_pipe &&
+				(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+				dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
 			break;
 		}
 	}
@@ -2902,6 +2908,16 @@ static void dce110_apply_ctx_for_surface(
 		program_surface_visibility(dc, pipe_ctx);
 
 	}
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+		if ((stream == pipe_ctx->stream) &&
+			(!pipe_ctx->top_pipe) &&
+			(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+	}
 }
 
 static void dce110_power_down_fe(struct dc *dc, int fe_idx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 63c2f5266142..0d1f0e2a4350 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2550,6 +2550,11 @@ static void dcn10_apply_ctx_for_surface(
 		if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
 			continue;
 
+		if (pipe_ctx->stream_res.tg &&
+			pipe_ctx->stream_res.tg->inst == be_idx &&
+			!pipe_ctx->top_pipe)
+			pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
 		/*
 		 * Powergate reused pipes that are not powergated
 		 * fairly hacky right now, using opp_id as indicator
@@ -2605,13 +2610,19 @@ static void dcn10_apply_ctx_for_surface(
 
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
 
 		if (pipe_ctx->stream != stream)
 			continue;
 
 		/* looking for top pipe to program */
-		if (!pipe_ctx->top_pipe)
+		if (!pipe_ctx->top_pipe) {
 			program_all_pipe_in_tree(dc, pipe_ctx, context);
+			if (pipe_ctx->stream_res.tg &&
+				pipe_ctx->stream_res.tg->inst == be_idx &&
+				(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+				pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+		}
 	}
 
 	dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
-- 
2.14.1



More information about the amd-gfx mailing list