[PATCH 18/18] drm/amd/display: init soc bounding box for dcn3.01.

Rodrigo Siqueira Rodrigo.Siqueira at amd.com
Fri Nov 20 20:19:58 UTC 2020


From: Yongqiang Sun <yongqiang.sun at amd.com>

[Why & How]
Update init soc bounding box and bw bounding box for DCN3.01.
Remove pp smu interface which isn't used.

Signed-off-by: Yongqiang Sun <yongqiang.sun at amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng at amd.com>
Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira at amd.com>
---
 .../amd/display/dc/dcn301/dcn301_resource.c   | 118 +++++++++---------
 1 file changed, 60 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 4b029631a22c..124ae5253d4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1345,9 +1345,6 @@ static void dcn301_destruct(struct dcn301_resource_pool *pool)
 
 	if (pool->base.dccg != NULL)
 		dcn_dccg_destroy(&pool->base.dccg);
-
-	if (pool->base.pp_smu != NULL)
-		dcn301_pp_smu_destroy(&pool->base.pp_smu);
 }
 
 struct hubp *dcn301_hubp_create(
@@ -1600,41 +1597,25 @@ static bool init_soc_bounding_box(struct dc *dc,
 		}
 	}
 
-	if (pool->base.pp_smu) {
-		struct pp_smu_nv_clock_table max_clocks = {0};
-		unsigned int uclk_states[8] = {0};
-		unsigned int num_states = 0;
-		enum pp_smu_status status;
-		bool clock_limits_available = false;
-		bool uclk_states_available = false;
+	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
+	loaded_ip->max_num_dpp = pool->base.pipe_count;
+	dcn20_patch_bounding_box(dc, loaded_bb);
 
-		if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
-			status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
-				(&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
+	if (!bb && dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+		struct bp_soc_bb_info bb_info = {0};
 
-			uclk_states_available = (status == PP_SMU_RESULT_OK);
-		}
+		if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
+			if (bb_info.dram_clock_change_latency_100ns > 0)
+				dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
 
-		if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
-			status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
-					(&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
-			/* SMU cannot set DCF clock to anything equal to or higher than SOC clock
-			 */
-			if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
-				max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
-			clock_limits_available = (status == PP_SMU_RESULT_OK);
-		}
+			if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+				dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
 
-		if (clock_limits_available && uclk_states_available && num_states)
-			dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
-		else if (clock_limits_available)
-			dcn20_cap_soc_clocks(loaded_bb, max_clocks);
+			if (bb_info.dram_sr_exit_latency_100ns > 0)
+				dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+		}
 	}
 
-	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
-	loaded_ip->max_num_dpp = pool->base.pipe_count;
-	dcn20_patch_bounding_box(dc, loaded_bb);
-
 	return true;
 }
 
@@ -1682,36 +1663,58 @@ static void set_wm_ranges(
 	pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
 }
 
-static struct pp_smu_funcs *dcn301_pp_smu_create(struct dc_context *ctx)
+static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
 {
-	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
-
-	if (!pp_smu)
-		return pp_smu;
-
-	if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && !IS_DIAG_DC(ctx->dce_environment)) {
-		dm_pp_get_funcs(ctx, pp_smu);
-
-		/* TODO: update once we have n21 smu*/
-		if (pp_smu->ctx.ver != PP_SMU_VER_NV)
-			pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
-	}
-
-	return pp_smu;
-}
+	struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
+	struct clk_limit_table *clk_table = &bw_params->clk_table;
+	struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+	unsigned int i, closest_clk_lvl;
+	int j;
+
+	// Default clock levels are used for diags, which may lead to overclocking.
+	if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+		dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+		dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
+		dcn3_01_soc.num_chans = bw_params->num_channels;
+
+		ASSERT(clk_table->num_entries);
+		for (i = 0; i < clk_table->num_entries; i++) {
+			/* loop backwards*/
+			for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
+				if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+					closest_clk_lvl = j;
+					break;
+				}
+			}
 
-static void dcn301_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
-{
-	if (pp_smu && *pp_smu) {
-		kfree(*pp_smu);
-		*pp_smu = NULL;
+			clock_limits[i].state = i;
+			clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+			clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+			clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+			clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+			clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+			clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+			clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+			clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+			clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+			clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+			clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+		}
+		for (i = 0; i < clk_table->num_entries; i++)
+			dcn3_01_soc.clock_limits[i] = clock_limits[i];
+		if (clk_table->num_entries) {
+			dcn3_01_soc.num_states = clk_table->num_entries;
+			/* duplicate last level */
+			dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+			dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
+		}
 	}
-}
 
-static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
 	dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
 	dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+	dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
 }
 
 static struct resource_funcs dcn301_res_pool_funcs = {
@@ -1862,9 +1865,8 @@ static bool dcn301_resource_construct(
 		goto create_fail;
 	}
 
-	/* PP Lib and SMU interfaces */
-	pool->base.pp_smu = dcn301_pp_smu_create(ctx);
 	init_soc_bounding_box(dc, pool);
+
 	if (!dc->debug.disable_pplib_wm_range && pool->base.pp_smu->nv_funcs.set_wm_ranges)
 		set_wm_ranges(pool->base.pp_smu, &dcn3_01_soc);
 
-- 
2.29.2



More information about the amd-gfx mailing list