[PATCH 6.10 090/263] drm/amdgpu/pm: Fix the param type of set_power_profile_mode
Deucher, Alexander
Alexander.Deucher at amd.com
Mon Aug 19 20:12:55 UTC 2024
[Public]
> -----Original Message-----
> From: Jiri Slaby <jirislaby at kernel.org>
> Sent: Monday, August 19, 2024 3:54 AM
> To: Greg Kroah-Hartman <gregkh at linuxfoundation.org>;
> stable at vger.kernel.org
> Cc: patches at lists.linux.dev; Deucher, Alexander
> <Alexander.Deucher at amd.com>; Sasha Levin <sashal at kernel.org>; Koenig,
> Christian <Christian.Koenig at amd.com>; Pan, Xinhui <Xinhui.Pan at amd.com>;
> amd-gfx at lists.freedesktop.org
> Subject: Re: [PATCH 6.10 090/263] drm/amdgpu/pm: Fix the param type of
> set_power_profile_mode
>
> FTR:
> Delivery has failed to these recipients or groups:
> Ma Jun (Jun.Ma2 at amd.com)
> The email address you entered couldn't be found
>
> So the author of the patch CANNOT respond. Anyone else?
This was a Coverity fix. As to why it was pulled into stable, I think Sasha's scripts picked it up.
Alex
>
> On 19. 08. 24, 9:49, Jiri Slaby wrote:
> > On 12. 08. 24, 18:01, Greg Kroah-Hartman wrote:
> >> 6.10-stable review patch. If anyone has any objections, please let
> >> me know.
> >>
> >> ------------------
> >>
> >> From: Ma Jun <Jun.Ma2 at amd.com>
> >>
> >> [ Upstream commit f683f24093dd94a831085fe0ea8e9dc4c6c1a2d1 ]
> >>
> >> Function .set_power_profile_mode need an array as input parameter.
> >
> > Which one and why?
> >
> > static int smu_bump_power_profile_mode(struct smu_context *smu,
> > long *param,
> > uint32_t param_size)
> >
> > int (*set_power_profile_mode)(struct smu_context *smu, long *input,
> > uint32_t size);
> >
> > static int pp_set_power_profile_mode(void *handle, long *input,
> > uint32_t
> > size)
> >
> > int (*set_power_profile_mode)(struct pp_hwmgr *hwmgr, long *input,
> > uint32_t size);
> >
> > static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long
> > *input, uint32_t size) {
> > int workload_type = 0;
> > int result = 0;
> >
> > if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
> >
> >
> > There is absolutely no problem doing input[0] when a pointer to a
> > local non-array variable is passed, is it?
> >
> >> So define variable workload as an array to fix the below coverity
> >> warning.
> >
> > This very much looks like one of many Coverity false positives.
> >
> >> "Passing &workload to function
> >> hwmgr->hwmgr_func->set_power_profile_mode
> >> which uses it as an array. This might corrupt or misinterpret
> >> adjacent memory locations"
> >
> > Care to explain how this fixes anything but a Coverity false positive?
> > Why was this included in a stable tree at all?
> >
> >> Signed-off-by: Ma Jun <Jun.Ma2 at amd.com>
> >> Acked-by: Alex Deucher <alexander.deucher at amd.com>
> >> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> >> Signed-off-by: Sasha Levin <sashal at kernel.org>
> > ...
> >> --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> >> +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
> >> @@ -929,7 +929,7 @@ static int pp_dpm_switch_power_profile(void
> >> *handle,
> >> enum PP_SMC_POWER_PROFILE type, bool en)
> >> {
> >> struct pp_hwmgr *hwmgr = handle;
> >> - long workload;
> >> + long workload[1];
> >
> > This only obfuscates the code. So please revert this if you cannot
> > explain what real issue this actually fixes.
> >
> >> uint32_t index;
> >> if (!hwmgr || !hwmgr->pm_en)
> >> @@ -947,12 +947,12 @@ static int pp_dpm_switch_power_profile(void
> >> *handle,
> >> hwmgr->workload_mask &= ~(1 <<
> >> hwmgr->workload_prority[type]);
> >> index = fls(hwmgr->workload_mask);
> >> index = index > 0 && index <= Workload_Policy_Max ? index -
> >> 1 : 0;
> >> - workload = hwmgr->workload_setting[index];
> >> + workload[0] = hwmgr->workload_setting[index];
> >> } else {
> >> hwmgr->workload_mask |= (1 <<
> >> hwmgr->workload_prority[type]);
> >> index = fls(hwmgr->workload_mask);
> >> index = index <= Workload_Policy_Max ? index - 1 : 0;
> >> - workload = hwmgr->workload_setting[index];
> >> + workload[0] = hwmgr->workload_setting[index];
> >> }
> >> if (type == PP_SMC_POWER_PROFILE_COMPUTE && @@ -962,7
> +962,7 @@
> >> static int pp_dpm_switch_power_profile(void *handle,
> >> }
> >> if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
> >> - hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload,
> >> 0);
> >> + hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload,
> >> +0);
> >> return 0;
> >> }
> >> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
> >> b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
> >> index 1d829402cd2e2..f4bd8e9357e22 100644
> >> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
> >> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
> >> @@ -269,7 +269,7 @@ int psm_adjust_power_state_dynamic(struct
> >> pp_hwmgr *hwmgr, bool skip_display_set
> >> struct pp_power_state *new_ps)
> >> {
> >> uint32_t index;
> >> - long workload;
> >> + long workload[1];
> >> if (hwmgr->not_vf) {
> >> if (!skip_display_settings) @@ -294,10 +294,10 @@ int
> >> psm_adjust_power_state_dynamic(struct
> >> pp_hwmgr *hwmgr, bool skip_display_set
> >> if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
> >> index = fls(hwmgr->workload_mask);
> >> index = index > 0 && index <= Workload_Policy_Max ? index -
> >> 1 : 0;
> >> - workload = hwmgr->workload_setting[index];
> >> + workload[0] = hwmgr->workload_setting[index];
> >> - if (hwmgr->power_profile_mode != workload &&
> >> hwmgr->hwmgr_func->set_power_profile_mode)
> >> - hwmgr->hwmgr_func->set_power_profile_mode(hwmgr,
> >> &workload, 0);
> >> + if (hwmgr->power_profile_mode != workload[0] &&
> >> hwmgr->hwmgr_func->set_power_profile_mode)
> >> + hwmgr->hwmgr_func->set_power_profile_mode(hwmgr,
> >> workload, 0);
> >> }
> >> return 0;
> >> diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> >> b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> >> index e1796ecf9c05c..06409133b09b1 100644
> >> --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> >> +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
> >> @@ -2220,7 +2220,7 @@ static int
> >> smu_adjust_power_state_dynamic(struct
> >> smu_context *smu,
> >> {
> >> int ret = 0;
> >> int index = 0;
> >> - long workload;
> >> + long workload[1];
> >> struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> >> if (!skip_display_settings) {
> >> @@ -2260,10 +2260,10 @@ static int
> >> smu_adjust_power_state_dynamic(struct smu_context *smu,
> >> smu_dpm_ctx->dpm_level !=
> >> AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
> >> index = fls(smu->workload_mask);
> >> index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index -
> >> 1 : 0;
> >> - workload = smu->workload_setting[index];
> >> + workload[0] = smu->workload_setting[index];
> >> - if (smu->power_profile_mode != workload)
> >> - smu_bump_power_profile_mode(smu, &workload, 0);
> >> + if (smu->power_profile_mode != workload[0])
> >> + smu_bump_power_profile_mode(smu, workload, 0);
> >> }
> >> return ret;
> >> @@ -2313,7 +2313,7 @@ static int smu_switch_power_profile(void
> >> *handle,
> >> {
> >> struct smu_context *smu = handle;
> >> struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
> >> - long workload;
> >> + long workload[1];
> >> uint32_t index;
> >> if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -
> 2326,17
> >> +2326,17 @@ static int smu_switch_power_profile(void *handle,
> >> smu->workload_mask &= ~(1 << smu->workload_prority[type]);
> >> index = fls(smu->workload_mask);
> >> index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index -
> >> 1 : 0;
> >> - workload = smu->workload_setting[index];
> >> + workload[0] = smu->workload_setting[index];
> >> } else {
> >> smu->workload_mask |= (1 << smu->workload_prority[type]);
> >> index = fls(smu->workload_mask);
> >> index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
> >> - workload = smu->workload_setting[index];
> >> + workload[0] = smu->workload_setting[index];
> >> }
> >> if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL
> &&
> >> smu_dpm_ctx->dpm_level !=
> >> AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
> >> - smu_bump_power_profile_mode(smu, &workload, 0);
> >> + smu_bump_power_profile_mode(smu, workload, 0);
> >> return 0;
> >> }
> >
> > thanks,
>
> --
> js
> suse labs
More information about the amd-gfx
mailing list