[PATCH 3/3] drm/amd/pp: Remove dead error checking code on Vega10
Alex Deucher
alexdeucher at gmail.com
Thu Feb 22 14:31:39 UTC 2018
On Thu, Feb 22, 2018 at 4:31 AM, Rex Zhu <Rex.Zhu at amd.com> wrote:
> when smu failed, print out the error info in smumgr immediately
> for debug. smum_send_msg_to_smu always return true, so delete
> error check code under hwmgr.
>
> Change-Id: I905073374826ef139d0264bedd590f53b6fd3c17
> Signed-off-by: Rex Zhu <Rex.Zhu at amd.com>
Series is:
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
> ---
> drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 113 ++++++---------------
> .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 22 ++--
> .../gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 10 +-
> 3 files changed, 44 insertions(+), 101 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> index 4c53dab..6c769b3 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
> @@ -2868,11 +2868,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
> (struct vega10_hwmgr *)(hwmgr->backend);
> int tmp_result, result = 0;
>
> - tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
> - PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
> - PP_ASSERT_WITH_CODE(!tmp_result,
> - "Failed to configure telemetry!",
> - return tmp_result);
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
>
> smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_NumOfDisplays, 0);
> @@ -2883,13 +2880,9 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
> return 0);
>
> if ((hwmgr->smu_version == 0x001c2c00) ||
> - (hwmgr->smu_version == 0x001c2d00)) {
> - tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
> + (hwmgr->smu_version == 0x001c2d00))
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
> - PP_ASSERT_WITH_CODE(!tmp_result,
> - "Failed to set package power PID!",
> - return tmp_result);
> - }
>
> tmp_result = vega10_construct_voltage_tables(hwmgr);
> PP_ASSERT_WITH_CODE(!tmp_result,
> @@ -3642,12 +3635,10 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
> if (!data->registry_data.sclk_dpm_key_disabled) {
> if (data->smc_state_table.gfx_boot_level !=
> data->dpm_table.gfx_table.dpm_state.soft_min_level) {
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
> + smum_send_msg_to_smc_with_parameter(
> hwmgr,
> PPSMC_MSG_SetSoftMinGfxclkByIndex,
> - data->smc_state_table.gfx_boot_level),
> - "Failed to set soft min sclk index!",
> - return -EINVAL);
> + data->smc_state_table.gfx_boot_level);
> data->dpm_table.gfx_table.dpm_state.soft_min_level =
> data->smc_state_table.gfx_boot_level;
> }
> @@ -3658,19 +3649,15 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
> data->dpm_table.mem_table.dpm_state.soft_min_level) {
> if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
> socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
> + smum_send_msg_to_smc_with_parameter(
> hwmgr,
> PPSMC_MSG_SetSoftMinSocclkByIndex,
> - socclk_idx),
> - "Failed to set soft min uclk index!",
> - return -EINVAL);
> + socclk_idx);
> } else {
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
> + smum_send_msg_to_smc_with_parameter(
> hwmgr,
> PPSMC_MSG_SetSoftMinUclkByIndex,
> - data->smc_state_table.mem_boot_level),
> - "Failed to set soft min uclk index!",
> - return -EINVAL);
> + data->smc_state_table.mem_boot_level);
> }
> data->dpm_table.mem_table.dpm_state.soft_min_level =
> data->smc_state_table.mem_boot_level;
> @@ -3690,12 +3677,9 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
> if (!data->registry_data.sclk_dpm_key_disabled) {
> if (data->smc_state_table.gfx_max_level !=
> data->dpm_table.gfx_table.dpm_state.soft_max_level) {
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
> - hwmgr,
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetSoftMaxGfxclkByIndex,
> - data->smc_state_table.gfx_max_level),
> - "Failed to set soft max sclk index!",
> - return -EINVAL);
> + data->smc_state_table.gfx_max_level);
> data->dpm_table.gfx_table.dpm_state.soft_max_level =
> data->smc_state_table.gfx_max_level;
> }
> @@ -3704,12 +3688,9 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
> if (!data->registry_data.mclk_dpm_key_disabled) {
> if (data->smc_state_table.mem_max_level !=
> data->dpm_table.mem_table.dpm_state.soft_max_level) {
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
> - hwmgr,
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetSoftMaxUclkByIndex,
> - data->smc_state_table.mem_max_level),
> - "Failed to set soft max mclk index!",
> - return -EINVAL);
> + data->smc_state_table.mem_max_level);
> data->dpm_table.mem_table.dpm_state.soft_max_level =
> data->smc_state_table.mem_max_level;
> }
> @@ -3779,7 +3760,6 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
> {
> struct vega10_hwmgr *data =
> (struct vega10_hwmgr *)(hwmgr->backend);
> - int result = 0;
> uint32_t low_sclk_interrupt_threshold = 0;
>
> if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
> @@ -3791,12 +3771,12 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
> cpu_to_le32(low_sclk_interrupt_threshold);
>
> /* This message will also enable SmcToHost Interrupt */
> - result = smum_send_msg_to_smc_with_parameter(hwmgr,
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetLowGfxclkInterruptThreshold,
> (uint32_t)low_sclk_interrupt_threshold);
> }
>
> - return result;
> + return 0;
> }
>
> static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
> @@ -3887,11 +3867,7 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
> {
> uint32_t value;
>
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
> - PPSMC_MSG_GetCurrPkgPwr),
> - "Failed to get current package power!",
> - return -EINVAL);
> -
> + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
> vega10_read_arg_from_smc(hwmgr, &value);
>
> /* power value is an integer */
> @@ -3974,10 +3950,10 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
> return ret;
> }
>
> -static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
> +static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
> bool has_disp)
> {
> - return smum_send_msg_to_smc_with_parameter(hwmgr,
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetUclkFastSwitch,
> has_disp ? 0 : 1);
> }
> @@ -4012,7 +3988,7 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
>
> if (!result) {
> clk_request = (clk_freq << 16) | clk_select;
> - result = smum_send_msg_to_smc_with_parameter(hwmgr,
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_RequestDisplayClockByFreq,
> clk_request);
> }
> @@ -4081,10 +4057,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
> clock_req.clock_type = amd_pp_dcef_clock;
> clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
> if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
> + smum_send_msg_to_smc_with_parameter(
> hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
> - min_clocks.dcefClockInSR /100),
> - "Attempt to set divider for DCEFCLK Failed!",);
> + min_clocks.dcefClockInSR /100);
> } else {
> pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
> }
> @@ -4564,14 +4539,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
> if (data->registry_data.sclk_dpm_key_disabled)
> break;
>
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
> - PPSMC_MSG_GetCurrentGfxclkIndex),
> - "Attempt to get current sclk index Failed!",
> - return -1);
> - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
> - &now),
> - "Attempt to read sclk index Failed!",
> - return -1);
> + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
> + vega10_read_arg_from_smc(hwmgr, &now);
>
> for (i = 0; i < sclk_table->count; i++)
> size += sprintf(buf + size, "%d: %uMhz %s\n",
> @@ -4582,14 +4551,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
> if (data->registry_data.mclk_dpm_key_disabled)
> break;
>
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
> - PPSMC_MSG_GetCurrentUclkIndex),
> - "Attempt to get current mclk index Failed!",
> - return -1);
> - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
> - &now),
> - "Attempt to read mclk index Failed!",
> - return -1);
> + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
> + vega10_read_arg_from_smc(hwmgr, &now);
>
> for (i = 0; i < mclk_table->count; i++)
> size += sprintf(buf + size, "%d: %uMhz %s\n",
> @@ -4597,14 +4560,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
> (i == now) ? "*" : "");
> break;
> case PP_PCIE:
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
> - PPSMC_MSG_GetCurrentLinkIndex),
> - "Attempt to get current mclk index Failed!",
> - return -1);
> - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
> - &now),
> - "Attempt to read mclk index Failed!",
> - return -1);
> + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
> + vega10_read_arg_from_smc(hwmgr, &now);
>
> for (i = 0; i < pcie_table->count; i++)
> size += sprintf(buf + size, "%d: %s %s\n", i,
> @@ -4836,24 +4793,18 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
>
> if (sclk_idx != ~0) {
> if (!data->registry_data.sclk_dpm_key_disabled)
> - PP_ASSERT_WITH_CODE(
> - !smum_send_msg_to_smc_with_parameter(
> + smum_send_msg_to_smc_with_parameter(
> hwmgr,
> PPSMC_MSG_SetSoftMinGfxclkByIndex,
> - sclk_idx),
> - "Failed to set soft min sclk index!",
> - return -EINVAL);
> + sclk_idx);
> }
>
> if (mclk_idx != ~0) {
> if (!data->registry_data.mclk_dpm_key_disabled)
> - PP_ASSERT_WITH_CODE(
> - !smum_send_msg_to_smc_with_parameter(
> + smum_send_msg_to_smc_with_parameter(
> hwmgr,
> PPSMC_MSG_SetSoftMinUclkByIndex,
> - mclk_idx),
> - "Failed to set soft min mclk index!",
> - return -EINVAL);
> + mclk_idx);
> }
>
> return 0;
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
> index 981c9e5..f5ed171 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
> @@ -850,7 +850,6 @@ static int vega10_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const
> static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
> {
> uint32_t data;
> - int result;
> uint32_t en = (enable ? 1 : 0);
> uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
>
> @@ -924,11 +923,10 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
> }
> }
>
> - if (enable) {
> - /* For Vega10, SMC does not support any mask yet. */
> - result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
> - PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
> - }
> + /* For Vega10, SMC does not support any mask yet. */
> + if (enable)
> + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
> +
> }
>
> static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
> @@ -1344,7 +1342,7 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
> (struct vega10_hwmgr *)(hwmgr->backend);
>
> if (data->registry_data.enable_pkg_pwr_tracking_feature)
> - return smum_send_msg_to_smc_with_parameter(hwmgr,
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetPptLimit, n);
>
> return 0;
> @@ -1406,24 +1404,24 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
> return 0;
> }
>
> -static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
> +static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
> uint32_t adjust_percent)
> {
> - return smum_send_msg_to_smc_with_parameter(hwmgr,
> + smum_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
> }
>
> int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
> {
> - int adjust_percent, result = 0;
> + int adjust_percent;
>
> if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
> adjust_percent =
> hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
> hwmgr->platform_descriptor.TDPAdjustment :
> (-1 * hwmgr->platform_descriptor.TDPAdjustment);
> - result = vega10_set_overdrive_target_percentage(hwmgr,
> + vega10_set_overdrive_target_percentage(hwmgr,
> (uint32_t)adjust_percent);
> }
> - return result;
> + return 0;
> }
> diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
> index 7491163..994c5481 100644
> --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
> +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
> @@ -31,14 +31,8 @@
>
> static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
> {
> - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
> - PPSMC_MSG_GetCurrentRpm),
> - "Attempt to get current RPM from SMC Failed!",
> - return -1);
> - PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
> - current_rpm),
> - "Attempt to read current RPM from SMC Failed!",
> - return -1);
> + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm);
> + vega10_read_arg_from_smc(hwmgr, current_rpm);
> return 0;
> }
>
> --
> 1.9.1
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
More information about the amd-gfx
mailing list