[PATCH 14/51] drm/amd/display: get rid of cur_clks from dcn_bw_output
Harry Wentland
harry.wentland at amd.com
Tue Jun 19 21:10:21 UTC 2018
From: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
Cleans up dcn_bw_output to only contain calculated info,
actual programmed values will now be stored in respective blocks.
Change-Id: I8d5139ba4bea9e6738bd6d8bd8e45ec82477c276
Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com>
Reviewed-by: Nikola Cornij <Nikola.Cornij at amd.com>
Acked-by: Harry Wentland <harry.wentland at amd.com>
---
.../gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 28 +++---
.../gpu/drm/amd/display/dc/core/dc_debug.c | 24 +++---
drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +-
.../gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 +-
.../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 85 +++++++++----------
.../gpu/drm/amd/display/dc/inc/core_types.h | 3 +-
6 files changed, 72 insertions(+), 74 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 9ce329e8f287..b8195e5a0676 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -977,42 +977,42 @@ bool dcn_validate_bandwidth(
display_pipe_configuration(v);
calc_wm_sets_and_perf_params(context, v);
- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
- context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
- context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
- context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+ context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
- context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+ context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
- if (context->bw.dcn.calc_clk.dispclk_khz <
+ if (context->bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
- context->bw.dcn.calc_clk.dispclk_khz =
+ context->bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
- context->bw.dcn.calc_clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
+ context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
- context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 267c76766dea..e1ebdf7b5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -352,19 +352,19 @@ void context_clock_trace(
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_khz,
- context->bw.dcn.calc_clk.dcfclk_khz,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz);
+ context->bw.dcn.clk.dispclk_khz,
+ context->bw.dcn.clk.dppclk_khz,
+ context->bw.dcn.clk.dcfclk_khz,
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.clk.fclk_khz,
+ context->bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.calc_clk.dispclk_khz,
- context->bw.dcn.calc_clk.dppclk_khz,
- context->bw.dcn.calc_clk.dcfclk_khz,
- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.calc_clk.fclk_khz,
- context->bw.dcn.calc_clk.socclk_khz);
+ context->bw.dcn.clk.dispclk_khz,
+ context->bw.dcn.clk.dppclk_khz,
+ context->bw.dcn.clk.dcfclk_khz,
+ context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.clk.fclk_khz,
+ context->bw.dcn.clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 42fa895964ea..2f9c23d94b50 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1284,7 +1284,7 @@ static enum dc_status enable_link_dp(
max_link_rate = LINK_RATE_HIGH3;
if (link_settings.link_rate == max_link_rate) {
- struct dc_clocks clocks = state->bw.dcn.calc_clk;
+ struct dc_clocks clocks = state->bw.dcn.clk;
/* dce/dcn compat, do not update dispclk */
clocks.dispclk_khz = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 93e6063c4b97..6b6570ea998d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -584,8 +584,8 @@ static void dcn_update_clocks(struct dccg *dccg,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
- /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);*/
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;*/
dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
send_request_to_lower = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index eae2fd7692da..2c15854f08e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -337,13 +337,13 @@ void dcn10_log_hw_state(struct dc *dc)
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
- dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
- dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.calc_clk.dispclk_khz,
- dc->current_state->bw.dcn.calc_clk.dppclk_khz,
- dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
- dc->current_state->bw.dcn.calc_clk.fclk_khz,
- dc->current_state->bw.dcn.calc_clk.socclk_khz);
+ dc->current_state->bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
+ dc->current_state->bw.dcn.clk.fclk_khz,
+ dc->current_state->bw.dcn.clk.socclk_khz);
log_mpc_crc(dc);
@@ -1967,18 +1967,17 @@ static void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
- bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
- context->bw.dcn.cur_clk.dispclk_khz / 2;
+ bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+ dc->res_pool->dccg->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
should_divided_by_2,
true);
- dc->current_state->bw.dcn.cur_clk.dppclk_khz =
- should_divided_by_2 ?
- context->bw.dcn.cur_clk.dispclk_khz / 2 :
- context->bw.dcn.cur_clk.dispclk_khz;
+ dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
+ dc->res_pool->dccg->clks.dispclk_khz / 2 :
+ dc->res_pool->dccg->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2173,7 +2172,7 @@ static void dcn10_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
@@ -2378,29 +2377,29 @@ static void dcn10_apply_ctx_for_surface(
static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
{
- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.calc_clk.dppclk_khz;
- bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.cur_clk.dispclk_khz;
- int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
- bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
- context->bw.dcn.cur_clk.dppclk_khz;
+ bool request_dpp_div = context->bw.dcn.clk.dispclk_khz >
+ context->bw.dcn.clk.dppclk_khz;
+ bool dispclk_increase = context->bw.dcn.clk.dispclk_khz >
+ dc->res_pool->dccg->clks.dispclk_khz;
+ int disp_clk_threshold = context->bw.dcn.clk.max_supported_dppclk_khz;
+ bool cur_dpp_div = dc->res_pool->dccg->clks.dispclk_khz >
+ dc->res_pool->dccg->clks.dppclk_khz;
/* increase clock, looking for div is 0 for current, request div is 1*/
if (dispclk_increase) {
/* already divided by 2, no need to reach target clk with 2 steps*/
if (cur_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
+ return context->bw.dcn.clk.dispclk_khz;
/* request disp clk is lower than maximum supported dpp clk,
* no need to reach target clk with two steps.
*/
- if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
- return context->bw.dcn.calc_clk.dispclk_khz;
+ if (context->bw.dcn.clk.dispclk_khz <= disp_clk_threshold)
+ return context->bw.dcn.clk.dispclk_khz;
/* target dpp clk not request divided by 2, still within threshold */
if (!request_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
+ return context->bw.dcn.clk.dispclk_khz;
} else {
/* decrease clock, looking for current dppclk divided by 2,
@@ -2409,17 +2408,17 @@ static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
/* current dpp clk not divided by 2, no need to ramp*/
if (!cur_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
+ return context->bw.dcn.clk.dispclk_khz;
/* current disp clk is lower than current maximum dpp clk,
* no need to ramp
*/
- if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
- return context->bw.dcn.calc_clk.dispclk_khz;
+ if (dc->res_pool->dccg->clks.dispclk_khz <= disp_clk_threshold)
+ return context->bw.dcn.clk.dispclk_khz;
/* request dpp clk need to be divided by 2 */
if (request_dpp_div)
- return context->bw.dcn.calc_clk.dispclk_khz;
+ return context->bw.dcn.clk.dispclk_khz;
}
return disp_clk_threshold;
@@ -2428,8 +2427,8 @@ static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
{
int i;
- bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
- context->bw.dcn.calc_clk.dppclk_khz;
+ bool request_dpp_div = context->bw.dcn.clk.dispclk_khz >
+ context->bw.dcn.clk.dppclk_khz;
int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
@@ -2452,18 +2451,18 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
}
/* If target clk not same as dppclk threshold, set to target clock */
- if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
+ if (dispclk_to_dpp_threshold != context->bw.dcn.clk.dispclk_khz) {
dc->res_pool->dccg->funcs->set_dispclk(
dc->res_pool->dccg,
- context->bw.dcn.calc_clk.dispclk_khz);
+ context->bw.dcn.clk.dispclk_khz);
}
- context->bw.dcn.cur_clk.dispclk_khz =
- context->bw.dcn.calc_clk.dispclk_khz;
- context->bw.dcn.cur_clk.dppclk_khz =
- context->bw.dcn.calc_clk.dppclk_khz;
- context->bw.dcn.cur_clk.max_supported_dppclk_khz =
- context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+ dc->res_pool->dccg->clks.dispclk_khz =
+ context->bw.dcn.clk.dispclk_khz;
+ dc->res_pool->dccg->clks.dppclk_khz =
+ context->bw.dcn.clk.dppclk_khz;
+ dc->res_pool->dccg->clks.max_supported_dppclk_khz =
+ context->bw.dcn.clk.max_supported_dppclk_khz;
}
static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
@@ -2484,11 +2483,11 @@ static void dcn10_set_bandwidth(
return;
if (context->stream_count == 0)
- context->bw.dcn.calc_clk.phyclk_khz = 0;
+ context->bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->dccg->funcs->update_clocks(
dc->res_pool->dccg,
- &context->bw.dcn.calc_clk,
+ &context->bw.dcn.clk,
decrease_allowed);
/* make sure dcf clk is before dpp clk to
@@ -2496,8 +2495,8 @@ static void dcn10_set_bandwidth(
*/
if (should_set_clock(
decrease_allowed,
- context->bw.dcn.calc_clk.dispclk_khz,
- dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
+ context->bw.dcn.clk.dispclk_khz,
+ dc->res_pool->dccg->clks.dispclk_khz)) {
ramp_up_dispclk_with_dpp(dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 44c48f3d0a1d..00d728e629fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -256,8 +256,7 @@ struct dce_bw_output {
};
struct dcn_bw_output {
- struct dc_clocks cur_clk;
- struct dc_clocks calc_clk;
+ struct dc_clocks clk;
struct dcn_watermark_set watermarks;
};
--
2.17.1
More information about the amd-gfx
mailing list