[PATCH] drm/amd/pm: fix and simplify workload handling
Lazar, Lijo
lijo.lazar at amd.com
Fri Nov 15 11:17:03 UTC 2024
On 11/15/2024 2:36 AM, Alex Deucher wrote:
> smu->workload_mask is IP specific and should not be messed with in
> the common code. The mask bits vary across SMU versions.
>
> Move all handling of smu->workload_mask in to the backends and
> simplify the code. Store the user's preference in smu->power_profile_mode
> which will be reflected in sysfs. For internal driver profile
> switches for KFD or VCN, just update the workload mask so that the
> user's preference is retained. Remove all of the extra now unused
> workload related elements in the smu structure.
>
> v2: use refcounts for workload profiles
> v3: rework based on feedback from Lijo
>
> Fixes: 8cc438be5d49 ("drm/amd/pm: correct the workload setting")
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> Cc: Kenneth Feng <kenneth.feng at amd.com>
> Cc: Lijo Lazar <lijo.lazar at amd.com>
> ---
> drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 165 +++++++++---------
> drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 21 ++-
> .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 147 ++++++++--------
> .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 151 ++++++++--------
> .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 150 ++++++++--------
> .../gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 41 ++---
> .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 43 ++---
> .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 153 ++++++++--------
> .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 120 +++++++------
> .../drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c | 141 ++++++++-------
> drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 38 +++-
> drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h | 7 +-
> 12 files changed, 614 insertions(+), 563 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> index c3a6b6f20455..ab6b30a9df1a 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> @@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
> static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
> static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
> static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
> +static void smu_power_profile_mode_get(struct smu_context *smu,
> + enum PP_SMC_POWER_PROFILE profile_mode);
> +static void smu_power_profile_mode_put(struct smu_context *smu,
> + enum PP_SMC_POWER_PROFILE profile_mode);
>
> static int smu_sys_get_pp_feature_mask(void *handle,
> char *buf)
> @@ -1268,9 +1272,6 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
> INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
> atomic64_set(&smu->throttle_int_counter, 0);
> smu->watermarks_bitmap = 0;
> - smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
> - smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
> - smu->user_dpm_profile.user_workload_mask = 0;
>
> for (i = 0; i < adev->vcn.num_vcn_inst; i++)
> atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
> @@ -1278,33 +1279,13 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
> atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
> atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
>
> - smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
> - smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
> - smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
> - smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
> - smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4;
> - smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
> - smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
> -
> if (smu->is_apu ||
> - !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) {
> - smu->driver_workload_mask =
> - 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
> - } else {
> - smu->driver_workload_mask =
> - 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
> - smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
> - }
> -
> - smu->workload_mask = smu->driver_workload_mask |
> - smu->user_dpm_profile.user_workload_mask;
> - smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
> - smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
> - smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
> - smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
> - smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
> - smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
> - smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
> + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
> + smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
> + else
> + smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
> + smu_power_profile_mode_get(smu, smu->power_profile_mode);
> +
> smu->display_config = &adev->pm.pm_display_cfg;
>
> smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
> @@ -2140,6 +2121,9 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block)
> if (!ret)
> adev->gfx.gfx_off_entrycount = count;
>
> + /* clear this on suspend so it will get reprogrammed on resume */
> + smu->frontend_workload_mask = 0;
> +
> return 0;
> }
>
> @@ -2251,26 +2235,46 @@ static int smu_enable_umd_pstate(void *handle,
> return 0;
> }
>
> -static int smu_bump_power_profile_mode(struct smu_context *smu,
> - long *param,
> - uint32_t param_size)
> +static int smu_bump_power_profile_mode(struct smu_context *smu)
> {
> - int ret = 0;
> + u32 workload_mask = 0;
> + int i, ret = 0;
> +
> + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
> + if (smu->workload_refcount[i])
> + workload_mask |= 1 << i;
> + }
> +
> + if (smu->frontend_workload_mask == workload_mask)
> + return 0;
If you notice, smu->backend_workload_mask is really not used. I think
only a single mask is required. At any point, smu->workload_refcount[i]
can be used to derive the mask. I think we just need to move the above
logic to smu_cmn_get_backend_workload_mask/smu_cmn_get_workload_mask.
While going for suspend, only clear smu->workload_mask. During resume
bump_profile_mode() will be called and at that time, we will have
smu_cmn_get_backend_workload_mask() != smu->workload_mask
To check if custom profile is requested,this will do -
if (smu->workload_refcount[PP_SMC_POWER_PROFILE_CUSTOM]).
The decision for smu_cmn_get_backend_workload_mask() !=
smu->workload_mask may be left to the backend.
It's possible that the parameters for custom changed, but the mask
remains same. The current check in bump_profile_mode() doesn't appear to
cover that case.
// custom_param_changed = existing check in this patch.
In backend we can check
if (smu->workload_refcount[PP_SMC_POWER_PROFILE_CUSTOM] &&
custom_param_changed)
Also, anytime a smu_bump_power_profile_mode() fails we need to do
smu_power_profile_mode_put(smu, type)
Not able to see that in the patch, not sure if it's handled in a
different way.
Thanks,
Lijo
>
> if (smu->ppt_funcs->set_power_profile_mode)
> - ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
> + ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask);
> +
> + if (!ret)
> + smu->frontend_workload_mask = workload_mask;
>
> return ret;
> }
>
> +static void smu_power_profile_mode_get(struct smu_context *smu,
> + enum PP_SMC_POWER_PROFILE profile_mode)
> +{
> + smu->workload_refcount[profile_mode]++;
> +}
> +
> +static void smu_power_profile_mode_put(struct smu_context *smu,
> + enum PP_SMC_POWER_PROFILE profile_mode)
> +{
> + if (smu->workload_refcount[profile_mode])
> + smu->workload_refcount[profile_mode]--;
> +}
> +
> static int smu_adjust_power_state_dynamic(struct smu_context *smu,
> enum amd_dpm_forced_level level,
> - bool skip_display_settings,
> - bool init)
> + bool skip_display_settings)
> {
> int ret = 0;
> - int index = 0;
> - long workload[1];
> struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
>
> if (!skip_display_settings) {
> @@ -2307,14 +2311,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
> }
>
> if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
> - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
> - index = fls(smu->workload_mask);
> - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
> - workload[0] = smu->workload_setting[index];
> -
> - if (init || smu->power_profile_mode != workload[0])
> - smu_bump_power_profile_mode(smu, workload, 0);
> - }
> + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
> + smu_bump_power_profile_mode(smu);
>
> return ret;
> }
> @@ -2333,13 +2331,13 @@ static int smu_handle_task(struct smu_context *smu,
> ret = smu_pre_display_config_changed(smu);
> if (ret)
> return ret;
> - ret = smu_adjust_power_state_dynamic(smu, level, false, false);
> + ret = smu_adjust_power_state_dynamic(smu, level, false);
> break;
> case AMD_PP_TASK_COMPLETE_INIT:
> - ret = smu_adjust_power_state_dynamic(smu, level, true, true);
> + ret = smu_adjust_power_state_dynamic(smu, level, true);
> break;
> case AMD_PP_TASK_READJUST_POWER_STATE:
> - ret = smu_adjust_power_state_dynamic(smu, level, true, false);
> + ret = smu_adjust_power_state_dynamic(smu, level, true);
> break;
> default:
> break;
> @@ -2361,12 +2359,10 @@ static int smu_handle_dpm_task(void *handle,
>
> static int smu_switch_power_profile(void *handle,
> enum PP_SMC_POWER_PROFILE type,
> - bool en)
> + bool enable)
> {
> struct smu_context *smu = handle;
> struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> - long workload[1];
> - uint32_t index;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
> return -EOPNOTSUPP;
> @@ -2374,24 +2370,14 @@ static int smu_switch_power_profile(void *handle,
> if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
> return -EINVAL;
>
> - if (!en) {
> - smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]);
> - index = fls(smu->workload_mask);
> - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
> - workload[0] = smu->workload_setting[index];
> - } else {
> - smu->driver_workload_mask |= (1 << smu->workload_priority[type]);
> - index = fls(smu->workload_mask);
> - index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
> - workload[0] = smu->workload_setting[index];
> - }
> -
> - smu->workload_mask = smu->driver_workload_mask |
> - smu->user_dpm_profile.user_workload_mask;
> -
> if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
> - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
> - smu_bump_power_profile_mode(smu, workload, 0);
> + smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
> + if (enable)
> + smu_power_profile_mode_get(smu, type);
> + else
> + smu_power_profile_mode_put(smu, type);
> + smu_bump_power_profile_mode(smu);
> + }
>
> return 0;
> }
> @@ -3090,21 +3076,44 @@ static int smu_set_power_profile_mode(void *handle,
> uint32_t param_size)
> {
> struct smu_context *smu = handle;
> - int ret;
> + bool custom_changed = false;
> + int ret = 0, i;
>
> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
> !smu->ppt_funcs->set_power_profile_mode)
> return -EOPNOTSUPP;
>
> - if (smu->user_dpm_profile.user_workload_mask &
> - (1 << smu->workload_priority[param[param_size]]))
> - return 0;
> + if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
> + if (param_size > SMU_BACKEND_MAX_CUSTOM_PARAMETERS)
> + return -EINVAL;
> + /* param_size is actually a max index, not an array size */
> + for (i = 0; i <= param_size; i++) {
> + if (smu->custom_profile_input[i] != param[i]) {
> + custom_changed = true;
> + break;
> + }
> + }
> + }
>
> - smu->user_dpm_profile.user_workload_mask =
> - (1 << smu->workload_priority[param[param_size]]);
> - smu->workload_mask = smu->user_dpm_profile.user_workload_mask |
> - smu->driver_workload_mask;
> - ret = smu_bump_power_profile_mode(smu, param, param_size);
> + if ((param[param_size] != smu->power_profile_mode) || custom_changed) {
> + /* save the parameters for custom */
> + if (custom_changed) {
> + /* param_size is actually a max index, not an array size */
> + for (i = 0; i <= param_size; i++)
> + smu->custom_profile_input[i] = param[i];
> + smu->custom_profile_size = param_size;
> + /* clear frontend mask so custom changes propogate */
> + smu->frontend_workload_mask = 0;
> + }
> + /* clear the old user preference */
> + smu_power_profile_mode_put(smu, smu->power_profile_mode);
> + /* set the new user preference */
> + smu_power_profile_mode_get(smu, param[param_size]);
> + ret = smu_bump_power_profile_mode(smu);
> + if (!ret)
> + /* store the user's preference */
> + smu->power_profile_mode = param[param_size];
> + }
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> index fa93a8879113..a9b88072bd05 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
> @@ -240,7 +240,6 @@ struct smu_user_dpm_profile {
> /* user clock state information */
> uint32_t clk_mask[SMU_CLK_COUNT];
> uint32_t clk_dependency;
> - uint32_t user_workload_mask;
> };
>
> #define SMU_TABLE_INIT(tables, table_id, s, a, d) \
> @@ -510,6 +509,8 @@ enum smu_fw_status {
> */
> #define SMU_WBRF_EVENT_HANDLING_PACE 10
>
> +#define SMU_BACKEND_MAX_CUSTOM_PARAMETERS 11
> +
> struct smu_context {
> struct amdgpu_device *adev;
> struct amdgpu_irq_src irq_source;
> @@ -557,12 +558,16 @@ struct smu_context {
> uint32_t hard_min_uclk_req_from_dal;
> bool disable_uclk_switch;
>
> - uint32_t workload_mask;
> - uint32_t driver_workload_mask;
> - uint32_t workload_priority[WORKLOAD_POLICY_MAX];
> - uint32_t workload_setting[WORKLOAD_POLICY_MAX];
> + /* asic agnostic workload mask */
> + uint32_t frontend_workload_mask;
> + /* asic specific workload mask */
> + uint32_t backend_workload_mask;
> + /* default/user workload preference */
> uint32_t power_profile_mode;
> - uint32_t default_power_profile_mode;
> + uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
> + /* backend specific custom workload settings */
> + long custom_profile_input[SMU_BACKEND_MAX_CUSTOM_PARAMETERS];
> + bool custom_profile_size;
> bool pm_enabled;
> bool is_apu;
>
> @@ -733,9 +738,9 @@ struct pptable_funcs {
> * @set_power_profile_mode: Set a power profile mode. Also used to
> * create/set custom power profile modes.
> * &input: Power profile mode parameters.
> - * &size: Size of &input.
> + * &workload_mask: mask of workloads to enable
> */
> - int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
> + int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask);
>
> /**
> * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> index 4b36c230e43a..64605cd932ab 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> @@ -1441,97 +1441,98 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
> return size;
> }
>
> -static int arcturus_set_power_profile_mode(struct smu_context *smu,
> - long *input,
> - uint32_t size)
> +static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu,
> + long *input,
> + uint32_t size)
> {
> DpmActivityMonitorCoeffInt_t activity_monitor;
> - int workload_type = 0;
> - uint32_t profile_mode = input[size];
> - int ret = 0;
> + int ret;
>
> - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
> - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
> + if (size != 10)
> return -EINVAL;
> +
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> + WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor),
> + false);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> + return ret;
> }
>
> - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
> - (smu->smc_fw_version >= 0x360d00)) {
> - if (size != 10)
> - return -EINVAL;
> + switch (input[0]) {
> + case 0: /* Gfxclk */
> + activity_monitor.Gfx_FPS = input[1];
> + activity_monitor.Gfx_UseRlcBusy = input[2];
> + activity_monitor.Gfx_MinActiveFreqType = input[3];
> + activity_monitor.Gfx_MinActiveFreq = input[4];
> + activity_monitor.Gfx_BoosterFreqType = input[5];
> + activity_monitor.Gfx_BoosterFreq = input[6];
> + activity_monitor.Gfx_PD_Data_limit_c = input[7];
> + activity_monitor.Gfx_PD_Data_error_coeff = input[8];
> + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
> + break;
> + case 1: /* Uclk */
> + activity_monitor.Mem_FPS = input[1];
> + activity_monitor.Mem_UseRlcBusy = input[2];
> + activity_monitor.Mem_MinActiveFreqType = input[3];
> + activity_monitor.Mem_MinActiveFreq = input[4];
> + activity_monitor.Mem_BoosterFreqType = input[5];
> + activity_monitor.Mem_BoosterFreq = input[6];
> + activity_monitor.Mem_PD_Data_limit_c = input[7];
> + activity_monitor.Mem_PD_Data_error_coeff = input[8];
> + activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
> + break;
> + default:
> + return -EINVAL;
> + }
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> - WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor),
> - false);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> - return ret;
> - }
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> + WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor),
> + true);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + return ret;
> + }
>
> - switch (input[0]) {
> - case 0: /* Gfxclk */
> - activity_monitor.Gfx_FPS = input[1];
> - activity_monitor.Gfx_UseRlcBusy = input[2];
> - activity_monitor.Gfx_MinActiveFreqType = input[3];
> - activity_monitor.Gfx_MinActiveFreq = input[4];
> - activity_monitor.Gfx_BoosterFreqType = input[5];
> - activity_monitor.Gfx_BoosterFreq = input[6];
> - activity_monitor.Gfx_PD_Data_limit_c = input[7];
> - activity_monitor.Gfx_PD_Data_error_coeff = input[8];
> - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
> - break;
> - case 1: /* Uclk */
> - activity_monitor.Mem_FPS = input[1];
> - activity_monitor.Mem_UseRlcBusy = input[2];
> - activity_monitor.Mem_MinActiveFreqType = input[3];
> - activity_monitor.Mem_MinActiveFreq = input[4];
> - activity_monitor.Mem_BoosterFreqType = input[5];
> - activity_monitor.Mem_BoosterFreq = input[6];
> - activity_monitor.Mem_PD_Data_limit_c = input[7];
> - activity_monitor.Mem_PD_Data_error_coeff = input[8];
> - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
> - break;
> - default:
> - return -EINVAL;
> - }
> + return ret;
> +}
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> - WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor),
> - true);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> - return ret;
> - }
> - }
> +static int arcturus_set_power_profile_mode(struct smu_context *smu,
> + u32 workload_mask)
> +{
> + u32 backend_workload_mask = 0;
> + bool custom_enabled = false;
> + int ret;
>
> - /*
> - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
> - * Not all profile modes are supported on arcturus.
> - */
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - profile_mode);
> - if (workload_type < 0) {
> - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
> - return -EINVAL;
> + smu_cmn_get_backend_workload_mask(smu, workload_mask,
> + &backend_workload_mask,
> + &custom_enabled);
> +
> + if (custom_enabled) {
> + ret = arcturus_set_power_profile_mode_coeff(smu,
> + smu->custom_profile_input,
> + smu->custom_profile_size);
> + if (ret)
> + return ret;
> }
>
> ret = smu_cmn_send_smc_msg_with_param(smu,
> - SMU_MSG_SetWorkloadMask,
> - smu->workload_mask,
> - NULL);
> + SMU_MSG_SetWorkloadMask,
> + backend_workload_mask,
> + NULL);
> if (ret) {
> - dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
> + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
> + workload_mask);
> return ret;
> }
>
> - smu_cmn_assign_power_profile(smu);
> + smu->backend_workload_mask = backend_workload_mask;
>
> - return 0;
> + return ret;
> }
>
> static int arcturus_set_performance_level(struct smu_context *smu,
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index 211635dabed8..8ed446b3458c 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -2006,90 +2006,101 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
> return size;
> }
>
> -static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
> +static int navi10_set_power_profile_mode_coeff(struct smu_context *smu,
> + long *input,
> + uint32_t size)
> {
> DpmActivityMonitorCoeffInt_t activity_monitor;
> - int workload_type, ret = 0;
> + int ret;
> +
> + if (size != 10)
> + return -EINVAL;
>
> - smu->power_profile_mode = input[size];
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor), false);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> + return ret;
> + }
>
> - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
> - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
> + switch (input[0]) {
> + case 0: /* Gfxclk */
> + activity_monitor.Gfx_FPS = input[1];
> + activity_monitor.Gfx_MinFreqStep = input[2];
> + activity_monitor.Gfx_MinActiveFreqType = input[3];
> + activity_monitor.Gfx_MinActiveFreq = input[4];
> + activity_monitor.Gfx_BoosterFreqType = input[5];
> + activity_monitor.Gfx_BoosterFreq = input[6];
> + activity_monitor.Gfx_PD_Data_limit_c = input[7];
> + activity_monitor.Gfx_PD_Data_error_coeff = input[8];
> + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
> + break;
> + case 1: /* Socclk */
> + activity_monitor.Soc_FPS = input[1];
> + activity_monitor.Soc_MinFreqStep = input[2];
> + activity_monitor.Soc_MinActiveFreqType = input[3];
> + activity_monitor.Soc_MinActiveFreq = input[4];
> + activity_monitor.Soc_BoosterFreqType = input[5];
> + activity_monitor.Soc_BoosterFreq = input[6];
> + activity_monitor.Soc_PD_Data_limit_c = input[7];
> + activity_monitor.Soc_PD_Data_error_coeff = input[8];
> + activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
> + break;
> + case 2: /* Memclk */
> + activity_monitor.Mem_FPS = input[1];
> + activity_monitor.Mem_MinFreqStep = input[2];
> + activity_monitor.Mem_MinActiveFreqType = input[3];
> + activity_monitor.Mem_MinActiveFreq = input[4];
> + activity_monitor.Mem_BoosterFreqType = input[5];
> + activity_monitor.Mem_BoosterFreq = input[6];
> + activity_monitor.Mem_PD_Data_limit_c = input[7];
> + activity_monitor.Mem_PD_Data_error_coeff = input[8];
> + activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
> + break;
> + default:
> return -EINVAL;
> }
>
> - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
> - if (size != 10)
> - return -EINVAL;
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor), true);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + return ret;
> + }
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor), false);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> - return ret;
> - }
> + return ret;
> +}
>
> - switch (input[0]) {
> - case 0: /* Gfxclk */
> - activity_monitor.Gfx_FPS = input[1];
> - activity_monitor.Gfx_MinFreqStep = input[2];
> - activity_monitor.Gfx_MinActiveFreqType = input[3];
> - activity_monitor.Gfx_MinActiveFreq = input[4];
> - activity_monitor.Gfx_BoosterFreqType = input[5];
> - activity_monitor.Gfx_BoosterFreq = input[6];
> - activity_monitor.Gfx_PD_Data_limit_c = input[7];
> - activity_monitor.Gfx_PD_Data_error_coeff = input[8];
> - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
> - break;
> - case 1: /* Socclk */
> - activity_monitor.Soc_FPS = input[1];
> - activity_monitor.Soc_MinFreqStep = input[2];
> - activity_monitor.Soc_MinActiveFreqType = input[3];
> - activity_monitor.Soc_MinActiveFreq = input[4];
> - activity_monitor.Soc_BoosterFreqType = input[5];
> - activity_monitor.Soc_BoosterFreq = input[6];
> - activity_monitor.Soc_PD_Data_limit_c = input[7];
> - activity_monitor.Soc_PD_Data_error_coeff = input[8];
> - activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
> - break;
> - case 2: /* Memclk */
> - activity_monitor.Mem_FPS = input[1];
> - activity_monitor.Mem_MinFreqStep = input[2];
> - activity_monitor.Mem_MinActiveFreqType = input[3];
> - activity_monitor.Mem_MinActiveFreq = input[4];
> - activity_monitor.Mem_BoosterFreqType = input[5];
> - activity_monitor.Mem_BoosterFreq = input[6];
> - activity_monitor.Mem_PD_Data_limit_c = input[7];
> - activity_monitor.Mem_PD_Data_error_coeff = input[8];
> - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
> - break;
> - default:
> - return -EINVAL;
> - }
> +static int navi10_set_power_profile_mode(struct smu_context *smu,
> + u32 workload_mask)
> +{
> + u32 backend_workload_mask = 0;
> + bool custom_enabled = false;
> + int ret;
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor), true);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + smu_cmn_get_backend_workload_mask(smu, workload_mask,
> + &backend_workload_mask,
> + &custom_enabled);
> +
> + if (custom_enabled) {
> + ret = navi10_set_power_profile_mode_coeff(smu,
> + smu->custom_profile_input,
> + smu->custom_profile_size);
> + if (ret)
> return ret;
> - }
> }
>
> - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - smu->power_profile_mode);
> - if (workload_type < 0)
> - return -EINVAL;
> -
> ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
> - smu->workload_mask, NULL);
> - if (ret)
> - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
> - else
> - smu_cmn_assign_power_profile(smu);
> + backend_workload_mask, NULL);
> + if (ret) {
> + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
> + workload_mask);
> + return ret;
> + }
> +
> + smu->backend_workload_mask = backend_workload_mask;
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index 844532a9b641..bea11bbe859c 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -1704,93 +1704,103 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
> return size;
> }
>
> -static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
> +static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu,
> + long *input, uint32_t size)
> {
>
> DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
> DpmActivityMonitorCoeffInt_t *activity_monitor =
> &(activity_monitor_external.DpmActivityMonitorCoeffInt);
> - int workload_type, ret = 0;
> + int ret;
> +
> + if (size != 10)
> + return -EINVAL;
>
> - smu->power_profile_mode = input[size];
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor_external), false);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> + return ret;
> + }
>
> - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
> - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
> + switch (input[0]) {
> + case 0: /* Gfxclk */
> + activity_monitor->Gfx_FPS = input[1];
> + activity_monitor->Gfx_MinFreqStep = input[2];
> + activity_monitor->Gfx_MinActiveFreqType = input[3];
> + activity_monitor->Gfx_MinActiveFreq = input[4];
> + activity_monitor->Gfx_BoosterFreqType = input[5];
> + activity_monitor->Gfx_BoosterFreq = input[6];
> + activity_monitor->Gfx_PD_Data_limit_c = input[7];
> + activity_monitor->Gfx_PD_Data_error_coeff = input[8];
> + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
> + break;
> + case 1: /* Socclk */
> + activity_monitor->Fclk_FPS = input[1];
> + activity_monitor->Fclk_MinFreqStep = input[2];
> + activity_monitor->Fclk_MinActiveFreqType = input[3];
> + activity_monitor->Fclk_MinActiveFreq = input[4];
> + activity_monitor->Fclk_BoosterFreqType = input[5];
> + activity_monitor->Fclk_BoosterFreq = input[6];
> + activity_monitor->Fclk_PD_Data_limit_c = input[7];
> + activity_monitor->Fclk_PD_Data_error_coeff = input[8];
> + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
> + break;
> + case 2: /* Memclk */
> + activity_monitor->Mem_FPS = input[1];
> + activity_monitor->Mem_MinFreqStep = input[2];
> + activity_monitor->Mem_MinActiveFreqType = input[3];
> + activity_monitor->Mem_MinActiveFreq = input[4];
> + activity_monitor->Mem_BoosterFreqType = input[5];
> + activity_monitor->Mem_BoosterFreq = input[6];
> + activity_monitor->Mem_PD_Data_limit_c = input[7];
> + activity_monitor->Mem_PD_Data_error_coeff = input[8];
> + activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
> + break;
> + default:
> return -EINVAL;
> }
>
> - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
> - if (size != 10)
> - return -EINVAL;
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor_external), true);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + return ret;
> + }
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor_external), false);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> - return ret;
> - }
> + return ret;
> +}
>
> - switch (input[0]) {
> - case 0: /* Gfxclk */
> - activity_monitor->Gfx_FPS = input[1];
> - activity_monitor->Gfx_MinFreqStep = input[2];
> - activity_monitor->Gfx_MinActiveFreqType = input[3];
> - activity_monitor->Gfx_MinActiveFreq = input[4];
> - activity_monitor->Gfx_BoosterFreqType = input[5];
> - activity_monitor->Gfx_BoosterFreq = input[6];
> - activity_monitor->Gfx_PD_Data_limit_c = input[7];
> - activity_monitor->Gfx_PD_Data_error_coeff = input[8];
> - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
> - break;
> - case 1: /* Socclk */
> - activity_monitor->Fclk_FPS = input[1];
> - activity_monitor->Fclk_MinFreqStep = input[2];
> - activity_monitor->Fclk_MinActiveFreqType = input[3];
> - activity_monitor->Fclk_MinActiveFreq = input[4];
> - activity_monitor->Fclk_BoosterFreqType = input[5];
> - activity_monitor->Fclk_BoosterFreq = input[6];
> - activity_monitor->Fclk_PD_Data_limit_c = input[7];
> - activity_monitor->Fclk_PD_Data_error_coeff = input[8];
> - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
> - break;
> - case 2: /* Memclk */
> - activity_monitor->Mem_FPS = input[1];
> - activity_monitor->Mem_MinFreqStep = input[2];
> - activity_monitor->Mem_MinActiveFreqType = input[3];
> - activity_monitor->Mem_MinActiveFreq = input[4];
> - activity_monitor->Mem_BoosterFreqType = input[5];
> - activity_monitor->Mem_BoosterFreq = input[6];
> - activity_monitor->Mem_PD_Data_limit_c = input[7];
> - activity_monitor->Mem_PD_Data_error_coeff = input[8];
> - activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
> - break;
> - default:
> - return -EINVAL;
> - }
> +static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu,
> + u32 workload_mask)
> +{
> + u32 backend_workload_mask = 0;
> + bool custom_enabled = false;
> + int ret;
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor_external), true);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + smu_cmn_get_backend_workload_mask(smu, workload_mask,
> + &backend_workload_mask,
> + &custom_enabled);
> +
> + if (custom_enabled) {
> + ret = sienna_cichlid_set_power_profile_mode_coeff(smu,
> + smu->custom_profile_input,
> + smu->custom_profile_size);
> + if (ret)
> return ret;
> - }
> }
>
> - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - smu->power_profile_mode);
> - if (workload_type < 0)
> - return -EINVAL;
> -
> ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
> - smu->workload_mask, NULL);
> - if (ret)
> - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
> - else
> - smu_cmn_assign_power_profile(smu);
> + backend_workload_mask, NULL);
> + if (ret) {
> + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
> + workload_mask);
> + return ret;
> + }
> +
> + smu->backend_workload_mask = backend_workload_mask;
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
> index f89c487dce72..279d01f58785 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
> @@ -1056,42 +1056,29 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
> return size;
> }
>
> -static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
> +static int vangogh_set_power_profile_mode(struct smu_context *smu,
> + u32 workload_mask)
> {
> - int workload_type, ret;
> - uint32_t profile_mode = input[size];
> + u32 backend_workload_mask = 0;
> + bool custom_enabled = false;
> + int ret;
>
> - if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
> - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
> - return -EINVAL;
> - }
> -
> - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
> - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
> - return 0;
> -
> - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - profile_mode);
> - if (workload_type < 0) {
> - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
> - profile_mode);
> - return -EINVAL;
> - }
> + smu_cmn_get_backend_workload_mask(smu, workload_mask,
> + &backend_workload_mask,
> + &custom_enabled);
>
> ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
> - smu->workload_mask,
> - NULL);
> + backend_workload_mask,
> + NULL);
> if (ret) {
> - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
> - workload_type);
> + dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n",
> + workload_mask);
> return ret;
> }
>
> - smu_cmn_assign_power_profile(smu);
> + smu->backend_workload_mask = backend_workload_mask;
>
> - return 0;
> + return ret;
> }
>
> static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> index 75a9ea87f419..f6d0973506d6 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> @@ -864,44 +864,29 @@ static int renoir_force_clk_levels(struct smu_context *smu,
> return ret;
> }
>
> -static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
> +static int renoir_set_power_profile_mode(struct smu_context *smu,
> + u32 workload_mask)
> {
> - int workload_type, ret;
> - uint32_t profile_mode = input[size];
> + int ret;
> + u32 backend_workload_mask = 0;
> + bool custom_enabled = false;
>
> - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
> - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
> - return -EINVAL;
> - }
> -
> - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
> - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
> - return 0;
> -
> - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - profile_mode);
> - if (workload_type < 0) {
> - /*
> - * TODO: If some case need switch to powersave/default power mode
> - * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
> - */
> - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode);
> - return -EINVAL;
> - }
> + smu_cmn_get_backend_workload_mask(smu, workload_mask,
> + &backend_workload_mask,
> + &custom_enabled);
>
> ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
> - smu->workload_mask,
> - NULL);
> + backend_workload_mask,
> + NULL);
> if (ret) {
> - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
> + dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n",
> + workload_mask);
> return ret;
> }
>
> - smu_cmn_assign_power_profile(smu);
> + smu->backend_workload_mask = backend_workload_mask;
>
> - return 0;
> + return ret;
> }
>
> static int renoir_set_peak_clock_by_device(struct smu_context *smu)
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
> index 80c6b1e523aa..4bc984cca6cd 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
> @@ -2571,82 +2571,76 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
> return size;
> }
>
> -static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
> - long *input,
> - uint32_t size)
> +static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu,
> + long *input,
> + uint32_t size)
> {
> DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
> DpmActivityMonitorCoeffInt_t *activity_monitor =
> &(activity_monitor_external.DpmActivityMonitorCoeffInt);
> - int workload_type, ret = 0;
> - u32 workload_mask;
> -
> - smu->power_profile_mode = input[size];
> + int ret;
>
> - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
> - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
> + if (size != 9)
> return -EINVAL;
> - }
> -
> - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
> - if (size != 9)
> - return -EINVAL;
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> - WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor_external),
> - false);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> - return ret;
> - }
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> + WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor_external),
> + false);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> + return ret;
> + }
>
> - switch (input[0]) {
> - case 0: /* Gfxclk */
> - activity_monitor->Gfx_FPS = input[1];
> - activity_monitor->Gfx_MinActiveFreqType = input[2];
> - activity_monitor->Gfx_MinActiveFreq = input[3];
> - activity_monitor->Gfx_BoosterFreqType = input[4];
> - activity_monitor->Gfx_BoosterFreq = input[5];
> - activity_monitor->Gfx_PD_Data_limit_c = input[6];
> - activity_monitor->Gfx_PD_Data_error_coeff = input[7];
> - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
> - break;
> - case 1: /* Fclk */
> - activity_monitor->Fclk_FPS = input[1];
> - activity_monitor->Fclk_MinActiveFreqType = input[2];
> - activity_monitor->Fclk_MinActiveFreq = input[3];
> - activity_monitor->Fclk_BoosterFreqType = input[4];
> - activity_monitor->Fclk_BoosterFreq = input[5];
> - activity_monitor->Fclk_PD_Data_limit_c = input[6];
> - activity_monitor->Fclk_PD_Data_error_coeff = input[7];
> - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
> - break;
> - default:
> - return -EINVAL;
> - }
> + switch (input[0]) {
> + case 0: /* Gfxclk */
> + activity_monitor->Gfx_FPS = input[1];
> + activity_monitor->Gfx_MinActiveFreqType = input[2];
> + activity_monitor->Gfx_MinActiveFreq = input[3];
> + activity_monitor->Gfx_BoosterFreqType = input[4];
> + activity_monitor->Gfx_BoosterFreq = input[5];
> + activity_monitor->Gfx_PD_Data_limit_c = input[6];
> + activity_monitor->Gfx_PD_Data_error_coeff = input[7];
> + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
> + break;
> + case 1: /* Fclk */
> + activity_monitor->Fclk_FPS = input[1];
> + activity_monitor->Fclk_MinActiveFreqType = input[2];
> + activity_monitor->Fclk_MinActiveFreq = input[3];
> + activity_monitor->Fclk_BoosterFreqType = input[4];
> + activity_monitor->Fclk_BoosterFreq = input[5];
> + activity_monitor->Fclk_PD_Data_limit_c = input[6];
> + activity_monitor->Fclk_PD_Data_error_coeff = input[7];
> + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
> + break;
> + default:
> + return -EINVAL;
> + }
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> - WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor_external),
> - true);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> - return ret;
> - }
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> + WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor_external),
> + true);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + return ret;
> }
>
> - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - smu->power_profile_mode);
> + return ret;
> +}
>
> - if (workload_type < 0)
> - return -EINVAL;
> +static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
> + u32 workload_mask)
> +{
> + u32 backend_workload_mask = 0;
> + bool custom_enabled = false;
> + int workload_type, ret;
>
> - workload_mask = 1 << workload_type;
> + smu_cmn_get_backend_workload_mask(smu, workload_mask,
> + &backend_workload_mask,
> + &custom_enabled);
>
> /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
> if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
> @@ -2658,26 +2652,29 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
> CMN2ASIC_MAPPING_WORKLOAD,
> PP_SMC_POWER_PROFILE_POWERSAVING);
> if (workload_type >= 0)
> - workload_mask |= 1 << workload_type;
> + backend_workload_mask |= 1 << workload_type;
> + }
> +
> + if (custom_enabled) {
> + ret = smu_v13_0_0_set_power_profile_mode_coeff(smu,
> + smu->custom_profile_input,
> + smu->custom_profile_size);
> + if (ret)
> + return ret;
> }
>
> - smu->workload_mask |= workload_mask;
> ret = smu_cmn_send_smc_msg_with_param(smu,
> - SMU_MSG_SetWorkloadMask,
> - smu->workload_mask,
> - NULL);
> - if (!ret) {
> - smu_cmn_assign_power_profile(smu);
> - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) {
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - PP_SMC_POWER_PROFILE_FULLSCREEN3D);
> - smu->power_profile_mode = smu->workload_mask & (1 << workload_type)
> - ? PP_SMC_POWER_PROFILE_FULLSCREEN3D
> - : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
> - }
> + SMU_MSG_SetWorkloadMask,
> + backend_workload_mask,
> + NULL);
> + if (ret) {
> + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
> + workload_mask);
> + return ret;
> }
>
> + smu->backend_workload_mask = backend_workload_mask;
> +
> return ret;
> }
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
> index c5d3e25cc967..225629eb9422 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
> @@ -2528,79 +2528,89 @@ do { \
> return result;
> }
>
> -static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
> +static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu,
> + long *input, uint32_t size)
> {
>
> DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
> DpmActivityMonitorCoeffInt_t *activity_monitor =
> &(activity_monitor_external.DpmActivityMonitorCoeffInt);
> - int workload_type, ret = 0;
> + int ret;
> +
> + if (size != 8)
> + return -EINVAL;
>
> - smu->power_profile_mode = input[size];
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor_external), false);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> + return ret;
> + }
>
> - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) {
> - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
> + switch (input[0]) {
> + case 0: /* Gfxclk */
> + activity_monitor->Gfx_ActiveHystLimit = input[1];
> + activity_monitor->Gfx_IdleHystLimit = input[2];
> + activity_monitor->Gfx_FPS = input[3];
> + activity_monitor->Gfx_MinActiveFreqType = input[4];
> + activity_monitor->Gfx_BoosterFreqType = input[5];
> + activity_monitor->Gfx_MinActiveFreq = input[6];
> + activity_monitor->Gfx_BoosterFreq = input[7];
> + break;
> + case 1: /* Fclk */
> + activity_monitor->Fclk_ActiveHystLimit = input[1];
> + activity_monitor->Fclk_IdleHystLimit = input[2];
> + activity_monitor->Fclk_FPS = input[3];
> + activity_monitor->Fclk_MinActiveFreqType = input[4];
> + activity_monitor->Fclk_BoosterFreqType = input[5];
> + activity_monitor->Fclk_MinActiveFreq = input[6];
> + activity_monitor->Fclk_BoosterFreq = input[7];
> + break;
> + default:
> return -EINVAL;
> }
>
> - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
> - if (size != 8)
> - return -EINVAL;
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor_external), true);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + return ret;
> + }
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor_external), false);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> - return ret;
> - }
> + return ret;
> +}
>
> - switch (input[0]) {
> - case 0: /* Gfxclk */
> - activity_monitor->Gfx_ActiveHystLimit = input[1];
> - activity_monitor->Gfx_IdleHystLimit = input[2];
> - activity_monitor->Gfx_FPS = input[3];
> - activity_monitor->Gfx_MinActiveFreqType = input[4];
> - activity_monitor->Gfx_BoosterFreqType = input[5];
> - activity_monitor->Gfx_MinActiveFreq = input[6];
> - activity_monitor->Gfx_BoosterFreq = input[7];
> - break;
> - case 1: /* Fclk */
> - activity_monitor->Fclk_ActiveHystLimit = input[1];
> - activity_monitor->Fclk_IdleHystLimit = input[2];
> - activity_monitor->Fclk_FPS = input[3];
> - activity_monitor->Fclk_MinActiveFreqType = input[4];
> - activity_monitor->Fclk_BoosterFreqType = input[5];
> - activity_monitor->Fclk_MinActiveFreq = input[6];
> - activity_monitor->Fclk_BoosterFreq = input[7];
> - break;
> - default:
> - return -EINVAL;
> - }
> +static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu,
> + u32 workload_mask)
> +{
> + u32 backend_workload_mask = 0;
> + bool custom_enabled = false;
> + int ret;
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor_external), true);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + smu_cmn_get_backend_workload_mask(smu, workload_mask,
> + &backend_workload_mask,
> + &custom_enabled);
> +
> + if (custom_enabled) {
> + ret = smu_v13_0_7_set_power_profile_mode_coeff(smu,
> + smu->custom_profile_input,
> + smu->custom_profile_size);
> + if (ret)
> return ret;
> - }
> }
>
> - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - smu->power_profile_mode);
> - if (workload_type < 0)
> - return -EINVAL;
> -
> ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
> - smu->workload_mask, NULL);
> + backend_workload_mask, NULL);
>
> - if (ret)
> - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
> - else
> - smu_cmn_assign_power_profile(smu);
> + if (ret) {
> + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
> + workload_mask);
> + return ret;
> + }
> +
> + smu->backend_workload_mask = backend_workload_mask;
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
> index 59b369eff30f..272a44b6faf7 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
> @@ -1717,89 +1717,100 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
> return size;
> }
>
> -static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
> - long *input,
> - uint32_t size)
> +static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
> + long *input,
> + uint32_t size)
> {
> DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
> DpmActivityMonitorCoeffInt_t *activity_monitor =
> &(activity_monitor_external.DpmActivityMonitorCoeffInt);
> - int workload_type, ret = 0;
> - uint32_t current_profile_mode = smu->power_profile_mode;
> - smu->power_profile_mode = input[size];
> + int ret;
>
> - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
> - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
> + if (size != 9)
> return -EINVAL;
> +
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> + WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor_external),
> + false);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> + return ret;
> }
>
> - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
> - if (size != 9)
> - return -EINVAL;
> + switch (input[0]) {
> + case 0: /* Gfxclk */
> + activity_monitor->Gfx_FPS = input[1];
> + activity_monitor->Gfx_MinActiveFreqType = input[2];
> + activity_monitor->Gfx_MinActiveFreq = input[3];
> + activity_monitor->Gfx_BoosterFreqType = input[4];
> + activity_monitor->Gfx_BoosterFreq = input[5];
> + activity_monitor->Gfx_PD_Data_limit_c = input[6];
> + activity_monitor->Gfx_PD_Data_error_coeff = input[7];
> + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
> + break;
> + case 1: /* Fclk */
> + activity_monitor->Fclk_FPS = input[1];
> + activity_monitor->Fclk_MinActiveFreqType = input[2];
> + activity_monitor->Fclk_MinActiveFreq = input[3];
> + activity_monitor->Fclk_BoosterFreqType = input[4];
> + activity_monitor->Fclk_BoosterFreq = input[5];
> + activity_monitor->Fclk_PD_Data_limit_c = input[6];
> + activity_monitor->Fclk_PD_Data_error_coeff = input[7];
> + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
> + break;
> + default:
> + return -EINVAL;
> + }
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> - WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor_external),
> - false);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
> - return ret;
> - }
> + ret = smu_cmn_update_table(smu,
> + SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> + WORKLOAD_PPLIB_CUSTOM_BIT,
> + (void *)(&activity_monitor_external),
> + true);
> + if (ret) {
> + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> + return ret;
> + }
>
> - switch (input[0]) {
> - case 0: /* Gfxclk */
> - activity_monitor->Gfx_FPS = input[1];
> - activity_monitor->Gfx_MinActiveFreqType = input[2];
> - activity_monitor->Gfx_MinActiveFreq = input[3];
> - activity_monitor->Gfx_BoosterFreqType = input[4];
> - activity_monitor->Gfx_BoosterFreq = input[5];
> - activity_monitor->Gfx_PD_Data_limit_c = input[6];
> - activity_monitor->Gfx_PD_Data_error_coeff = input[7];
> - activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
> - break;
> - case 1: /* Fclk */
> - activity_monitor->Fclk_FPS = input[1];
> - activity_monitor->Fclk_MinActiveFreqType = input[2];
> - activity_monitor->Fclk_MinActiveFreq = input[3];
> - activity_monitor->Fclk_BoosterFreqType = input[4];
> - activity_monitor->Fclk_BoosterFreq = input[5];
> - activity_monitor->Fclk_PD_Data_limit_c = input[6];
> - activity_monitor->Fclk_PD_Data_error_coeff = input[7];
> - activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
> - break;
> - default:
> - return -EINVAL;
> - }
> + return ret;
> +}
>
> - ret = smu_cmn_update_table(smu,
> - SMU_TABLE_ACTIVITY_MONITOR_COEFF,
> - WORKLOAD_PPLIB_CUSTOM_BIT,
> - (void *)(&activity_monitor_external),
> - true);
> - if (ret) {
> - dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
> - return ret;
> - }
> - }
> +static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
> + u32 workload_mask)
> +{
> + u32 backend_workload_mask = 0;
> + bool custom_enabled = false;
> + int ret;
> +
> + smu_cmn_get_backend_workload_mask(smu, workload_mask,
> + &backend_workload_mask,
> + &custom_enabled);
>
> - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
> + /* disable deep sleep if compute is enabled */
> + if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
> smu_v14_0_deep_sleep_control(smu, false);
> - else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
> + else
> smu_v14_0_deep_sleep_control(smu, true);
>
> - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
> - workload_type = smu_cmn_to_asic_specific_index(smu,
> - CMN2ASIC_MAPPING_WORKLOAD,
> - smu->power_profile_mode);
> - if (workload_type < 0)
> - return -EINVAL;
> + if (custom_enabled) {
> + ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
> + smu->custom_profile_input,
> + smu->custom_profile_size);
> + if (ret)
> + return ret;
> + }
>
> ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
> - smu->workload_mask, NULL);
> + backend_workload_mask, NULL);
> + if (ret) {
> + dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
> + workload_mask);
> + return ret;
> + }
>
> - if (!ret)
> - smu_cmn_assign_power_profile(smu);
> + smu->backend_workload_mask = backend_workload_mask;
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
> index fd2aa949538e..91a3bf074f78 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
> @@ -1141,14 +1141,6 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
> return ret;
> }
>
> -void smu_cmn_assign_power_profile(struct smu_context *smu)
> -{
> - uint32_t index;
> - index = fls(smu->workload_mask);
> - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
> - smu->power_profile_mode = smu->workload_setting[index];
> -}
> -
> bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
> {
> struct pci_dev *p = NULL;
> @@ -1226,3 +1218,33 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
> {
> policy->desc = &xgmi_plpd_policy_desc;
> }
> +
> +void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
> + u32 workload_mask,
> + u32 *backend_workload_mask,
> + bool *custom_enabled)
> +{
> + int workload_type;
> + u32 profile_mode;
> +
> + *custom_enabled = false;
> + *backend_workload_mask = 0;
> +
> + for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
> + if (!(workload_mask & (1 << profile_mode)))
> + continue;
> +
> + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
> + workload_type = smu_cmn_to_asic_specific_index(smu,
> + CMN2ASIC_MAPPING_WORKLOAD,
> + profile_mode);
> +
> + if (workload_type < 0)
> + continue;
> +
> + *backend_workload_mask |= 1 << workload_type;
> +
> + if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM)
> + *custom_enabled = true;
> + }
> +}
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
> index 8a801e389659..8d40c02efa00 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
> @@ -130,8 +130,6 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
> int smu_cmn_set_mp1_state(struct smu_context *smu,
> enum pp_mp1_state mp1_state);
>
> -void smu_cmn_assign_power_profile(struct smu_context *smu);
> -
> /*
> * Helper function to make sysfs_emit_at() happy. Align buf to
> * the current page boundary and record the offset.
> @@ -149,5 +147,10 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
> void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
> void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
>
> +void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
> + u32 workload_mask,
> + u32 *backend_workload_mask,
> + bool *custom_enabled);
> +
> #endif
> #endif
More information about the amd-gfx
mailing list