[PATCH 4/4] drm/amd/powerplay: put those exposed power interfaces in amdgpu_dpm.c
Quan, Evan
Evan.Quan at amd.com
Fri Aug 14 02:56:31 UTC 2020
[AMD Official Use Only - Internal Distribution Only]
Yes, I would like to make another patch to address Nirmoy's comments.
@Das, Nirmoy is that OK?
BR
Evan
-----Original Message-----
From: Alex Deucher <alexdeucher at gmail.com>
Sent: Thursday, August 13, 2020 9:54 PM
To: Das, Nirmoy <Nirmoy.Das at amd.com>
Cc: Quan, Evan <Evan.Quan at amd.com>; amd-gfx list <amd-gfx at lists.freedesktop.org>; Deucher, Alexander <Alexander.Deucher at amd.com>
Subject: Re: [PATCH 4/4] drm/amd/powerplay: put those exposed power interfaces in amdgpu_dpm.c
Series is:
Reviewed-by: Alex Deucher <alexander.deucher at amd.com> Nirmoy makes some good points below, but I think those should be a follow up patch as this patch is just moving code around; no actual changes.
Alex
On Thu, Aug 13, 2020 at 6:52 AM Nirmoy <nirmodas at amd.com> wrote:
>
> Acked-by: Nirmoy Das <nirmoy.das at amd.com> for 1st 3 patches. Check for
> below for
>
> more comments.
>
> On 8/13/20 11:08 AM, Evan Quan wrote:
> > As other power interfaces.
> >
> > Change-Id: I5e3b85ae21c4b1d0239f54fa75247b33cfdb7ddc
> > Signed-off-by: Evan Quan <evan.quan at amd.com>
> > ---
> > drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | 425 ++++++++++++++++++++++++
> > drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 14 +
> > drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 423 -----------------------
> > drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h | 8 -
> > 4 files changed, 439 insertions(+), 431 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> > index 2198148319e2..e114b5cbd8b0 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
> > @@ -28,6 +28,11 @@
> > #include "amdgpu_dpm.h"
> > #include "atom.h"
> > #include "amd_pcie.h"
> > +#include "amdgpu_display.h"
> > +#include "hwmgr.h"
> > +#include <linux/power_supply.h>
> > +
> > +#define WIDTH_4K 3840
> >
> > void amdgpu_dpm_print_class_info(u32 class, u32 class2)
> > {
> > @@ -1262,3 +1267,423 @@ int amdgpu_dpm_smu_i2c_bus_access(struct
> > amdgpu_device *adev,
> >
> > return ret;
> > }
> > +
> > +void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) {
> > + if (adev->pm.dpm_enabled) {
> > + mutex_lock(&adev->pm.mutex);
> > + if (power_supply_is_system_supplied() > 0)
> > + adev->pm.ac_power = true;
> > + else
> > + adev->pm.ac_power = false;
> > + if (adev->powerplay.pp_funcs &&
> > + adev->powerplay.pp_funcs->enable_bapm)
> > + amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
> > + mutex_unlock(&adev->pm.mutex);
> > +
> > + if (is_support_sw_smu(adev))
> > + smu_set_ac_dc(&adev->smu);
> > + }
> > +}
> > +
> > +int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
> > + void *data, uint32_t *size) {
> > + int ret = 0;
> > +
> > + if (!data || !size)
> > + return -EINVAL;
> > +
> > + if (is_support_sw_smu(adev))
> > + ret = smu_read_sensor(&adev->smu, sensor, data, size);
> > + else {
> > + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
> > + ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
> > + sensor, data, size);
> > + else
> > + ret = -EINVAL;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +void amdgpu_dpm_thermal_work_handler(struct work_struct *work) {
> > + struct amdgpu_device *adev =
> > + container_of(work, struct amdgpu_device,
> > + pm.dpm.thermal.work);
> > + /* switch to the thermal state */
> > + enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
> > + int temp, size = sizeof(temp);
> > +
> > + if (!adev->pm.dpm_enabled)
> > + return;
> > +
> > + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
> > + (void *)&temp, &size)) {
> > + if (temp < adev->pm.dpm.thermal.min_temp)
> > + /* switch back the user state */
> > + dpm_state = adev->pm.dpm.user_state;
> > + } else {
> > + if (adev->pm.dpm.thermal.high_to_low)
> > + /* switch back the user state */
> > + dpm_state = adev->pm.dpm.user_state;
> > + }
> > + mutex_lock(&adev->pm.mutex);
> > + if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
> > + adev->pm.dpm.thermal_active = true;
> > + else
> > + adev->pm.dpm.thermal_active = false;
> > + adev->pm.dpm.state = dpm_state;
> > + mutex_unlock(&adev->pm.mutex);
> > +
> > + amdgpu_pm_compute_clocks(adev); }
> > +
> > +static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
> > + enum
> > +amd_pm_state_type dpm_state) {
> > + int i;
> > + struct amdgpu_ps *ps;
> > + u32 ui_class;
> > + bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
> > + true : false;
> > +
> > + /* check if the vblank period is too short to adjust the mclk */
> > + if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
> > + if (amdgpu_dpm_vblank_too_short(adev))
> > + single_display = false;
> > + }
> > +
> > + /* certain older asics have a separare 3D performance state,
> > + * so try that first if the user selected performance
> > + */
> > + if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
> > + dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
> > + /* balanced states don't exist at the moment */
> > + if (dpm_state == POWER_STATE_TYPE_BALANCED)
> > + dpm_state = POWER_STATE_TYPE_PERFORMANCE;
> > +
> > +restart_search:
> > + /* Pick the best power state based on current conditions */
> > + for (i = 0; i < adev->pm.dpm.num_ps; i++) {
> > + ps = &adev->pm.dpm.ps[i];
> > + ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
> > + switch (dpm_state) {
> > + /* user states */
> > + case POWER_STATE_TYPE_BATTERY:
> > + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
> > + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
> > + if (single_display)
> > + return ps;
> > + } else
> > + return ps;
> > + }
> > + break;
> > + case POWER_STATE_TYPE_BALANCED:
> > + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
> > + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
> > + if (single_display)
> > + return ps;
> > + } else
> > + return ps;
> > + }
> > + break;
> > + case POWER_STATE_TYPE_PERFORMANCE:
> > + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
> > + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
> > + if (single_display)
> > + return ps;
> > + } else
> > + return ps;
> > + }
> > + break;
> > + /* internal states */
> > + case POWER_STATE_TYPE_INTERNAL_UVD:
> > + if (adev->pm.dpm.uvd_ps)
> > + return adev->pm.dpm.uvd_ps;
> > + else
>
>
> We don't need the "else" here.
>
>
> > + break;
> > + case POWER_STATE_TYPE_INTERNAL_UVD_SD:
> > + if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
> > + return ps;
> > + break;
> > + case POWER_STATE_TYPE_INTERNAL_UVD_HD:
> > + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
> > + return ps;
> > + break;
> > + case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
> > + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
> > + return ps;
> > + break;
> > + case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
> > + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
> > + return ps;
> > + break;
> > + case POWER_STATE_TYPE_INTERNAL_BOOT:
> > + return adev->pm.dpm.boot_ps;
> > + case POWER_STATE_TYPE_INTERNAL_THERMAL:
> > + if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
> > + return ps;
> > + break;
> > + case POWER_STATE_TYPE_INTERNAL_ACPI:
> > + if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
> > + return ps;
> > + break;
> > + case POWER_STATE_TYPE_INTERNAL_ULV:
> > + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
> > + return ps;
> > + break;
> > + case POWER_STATE_TYPE_INTERNAL_3DPERF:
> > + if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
> > + return ps;
> > + break;
> > + default:
> > + break;
> > + }
> > + }
> > + /* use a fallback state if we didn't match */
> > + switch (dpm_state) {
> > + case POWER_STATE_TYPE_INTERNAL_UVD_SD:
> > + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
> > + goto restart_search;
> > + case POWER_STATE_TYPE_INTERNAL_UVD_HD:
> > + case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
> > + case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
> > + if (adev->pm.dpm.uvd_ps) {
> > + return adev->pm.dpm.uvd_ps;
> > + } else {
> > + dpm_state = POWER_STATE_TYPE_PERFORMANCE;
> > + goto restart_search;
> > + }
> > + case POWER_STATE_TYPE_INTERNAL_THERMAL:
> > + dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
> > + goto restart_search;
> > + case POWER_STATE_TYPE_INTERNAL_ACPI:
> > + dpm_state = POWER_STATE_TYPE_BATTERY;
> > + goto restart_search;
> > + case POWER_STATE_TYPE_BATTERY:
> > + case POWER_STATE_TYPE_BALANCED:
> > + case POWER_STATE_TYPE_INTERNAL_3DPERF:
> > + dpm_state = POWER_STATE_TYPE_PERFORMANCE;
> > + goto restart_search;
>
>
> I think it will be a good idea to move restart_search block to a
> function so we can easily
>
> rule out any infinite loops with "goto restart_search" usages.
>
>
>
> > + default:
> > + break;
> > + }
> > +
> > + return NULL;
> > +}
> > +
> > +static void amdgpu_dpm_change_power_state_locked(struct
> > +amdgpu_device *adev) {
> > + struct amdgpu_ps *ps;
> > + enum amd_pm_state_type dpm_state;
> > + int ret;
> > + bool equal = false;
> > +
> > + /* if dpm init failed */
> > + if (!adev->pm.dpm_enabled)
> > + return;
> > +
> > + if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
> > + /* add other state override checks here */
> > + if ((!adev->pm.dpm.thermal_active) &&
> > + (!adev->pm.dpm.uvd_active))
> > + adev->pm.dpm.state = adev->pm.dpm.user_state;
> > + }
> > + dpm_state = adev->pm.dpm.state;
> > +
> > + ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
> > + if (ps)
> > + adev->pm.dpm.requested_ps = ps;
> > + else
> > + return;
>
>
> nitpick:
>
> if(!ns)
>
> return;
>
> adev->pm.dpm.requested_ps = ps;
>
>
> > +
> > + if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
> > + printk("switching from power state:\n");
> > + amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
> > + printk("switching to power state:\n");
> > + amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
> > + }
> > +
> > + /* update whether vce is active */
> > + ps->vce_active = adev->pm.dpm.vce_active;
> > + if (adev->powerplay.pp_funcs->display_configuration_changed)
> > + amdgpu_dpm_display_configuration_changed(adev);
> > +
> > + ret = amdgpu_dpm_pre_set_power_state(adev);
> > + if (ret)
> > + return;
> > +
> > + if (adev->powerplay.pp_funcs->check_state_equal) {
> > + if (0 != amdgpu_dpm_check_state_equal(adev,
> > + adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
>
>
> We don't need that check as amdgpu_dpm_check_state_equal() should not
> modify (default)value of
>
> "equal" on error.
>
>
> Nirmoy
>
> > + equal = false;
> > + }
> > +
> > + if (equal)
> > + return;
> > +
> > + amdgpu_dpm_set_power_state(adev);
> > + amdgpu_dpm_post_set_power_state(adev);
> > +
> > + adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
> > + adev->pm.dpm.current_active_crtc_count =
> > + adev->pm.dpm.new_active_crtc_count;
> > +
> > + if (adev->powerplay.pp_funcs->force_performance_level) {
> > + if (adev->pm.dpm.thermal_active) {
> > + enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
> > + /* force low perf level for thermal */
> > + amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
> > + /* save the user's level */
> > + adev->pm.dpm.forced_level = level;
> > + } else {
> > + /* otherwise, user selected level */
> > + amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
> > + }
> > + }
> > +}
> > +
> > +void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) {
> > + int i = 0;
> > +
> > + if (!adev->pm.dpm_enabled)
> > + return;
> > +
> > + if (adev->mode_info.num_crtc)
> > + amdgpu_display_bandwidth_update(adev);
> > +
> > + for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
> > + struct amdgpu_ring *ring = adev->rings[i];
> > + if (ring && ring->sched.ready)
> > + amdgpu_fence_wait_empty(ring);
> > + }
> > +
> > + if (is_support_sw_smu(adev)) {
> > + struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
> > + smu_handle_task(&adev->smu,
> > + smu_dpm->dpm_level,
> > + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
> > + true);
> > + } else {
> > + if (adev->powerplay.pp_funcs->dispatch_tasks) {
> > + if (!amdgpu_device_has_dc_support(adev)) {
> > + mutex_lock(&adev->pm.mutex);
> > + amdgpu_dpm_get_active_displays(adev);
> > + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
> > + adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
> > + adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
> > + /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
> > + if (adev->pm.pm_display_cfg.vrefresh > 120)
> > + adev->pm.pm_display_cfg.min_vblank_time = 0;
> > + if (adev->powerplay.pp_funcs->display_configuration_change)
> > + adev->powerplay.pp_funcs->display_configuration_change(
> > + adev->powerplay.pp_handle,
> > + &adev->pm.pm_display_cfg);
> > + mutex_unlock(&adev->pm.mutex);
> > + }
> > + amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
> > + } else {
> > + mutex_lock(&adev->pm.mutex);
> > + amdgpu_dpm_get_active_displays(adev);
> > + amdgpu_dpm_change_power_state_locked(adev);
> > + mutex_unlock(&adev->pm.mutex);
> > + }
> > + }
> > +}
> > +
> > +void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
> > +{
> > + int ret = 0;
> > +
> > + if (adev->family == AMDGPU_FAMILY_SI) {
> > + mutex_lock(&adev->pm.mutex);
> > + if (enable) {
> > + adev->pm.dpm.uvd_active = true;
> > + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
> > + } else {
> > + adev->pm.dpm.uvd_active = false;
> > + }
> > + mutex_unlock(&adev->pm.mutex);
> > +
> > + amdgpu_pm_compute_clocks(adev);
> > + } else {
> > + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
> > + if (ret)
> > + DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
> > + enable ? "enable" : "disable", ret);
> > +
> > + /* enable/disable Low Memory PState for UVD (4k videos) */
> > + if (adev->asic_type == CHIP_STONEY &&
> > + adev->uvd.decode_image_width >= WIDTH_4K) {
> > + struct pp_hwmgr *hwmgr =
> > + adev->powerplay.pp_handle;
> > +
> > + if (hwmgr && hwmgr->hwmgr_func &&
> > + hwmgr->hwmgr_func->update_nbdpm_pstate)
> > + hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
> > + !enable,
> > + true);
> > + }
> > + }
> > +}
> > +
> > +void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
> > +{
> > + int ret = 0;
> > +
> > + if (adev->family == AMDGPU_FAMILY_SI) {
> > + mutex_lock(&adev->pm.mutex);
> > + if (enable) {
> > + adev->pm.dpm.vce_active = true;
> > + /* XXX select vce level based on ring/task */
> > + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
> > + } else {
> > + adev->pm.dpm.vce_active = false;
> > + }
> > + mutex_unlock(&adev->pm.mutex);
> > +
> > + amdgpu_pm_compute_clocks(adev);
> > + } else {
> > + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
> > + if (ret)
> > + DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
> > + enable ? "enable" : "disable", ret);
> > + }
> > +}
> > +
> > +void amdgpu_pm_print_power_states(struct amdgpu_device *adev) {
> > + int i;
> > +
> > + if (adev->powerplay.pp_funcs->print_power_state == NULL)
> > + return;
> > +
> > + for (i = 0; i < adev->pm.dpm.num_ps; i++)
> > + amdgpu_dpm_print_power_state(adev,
> > + &adev->pm.dpm.ps[i]);
> > +
> > +}
> > +
> > +void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool
> > +enable) {
> > + int ret = 0;
> > +
> > + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
> > + if (ret)
> > + DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
> > + enable ? "enable" : "disable", ret); }
> > +
> > +int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev,
> > +uint32_t *smu_version) {
> > + int r;
> > +
> > + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
> > + r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
> > + if (r) {
> > + pr_err("smu firmware loading failed\n");
> > + return r;
> > + }
> > + *smu_version = adev->pm.fw_version;
> > + }
> > + return 0;
> > +}
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> > index 5a2344f839f2..dff4a5f99bb0 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
> > @@ -548,4 +548,18 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
> > int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
> > bool acquire);
> >
> > +void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
> > +
> > +int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
> > + void *data, uint32_t *size);
> > +
> > +void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
> > +
> > +void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); void
> > +amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
> > +void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool
> > +enable); void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev,
> > +bool enable); void amdgpu_pm_print_power_states(struct
> > +amdgpu_device *adev); int amdgpu_pm_load_smu_firmware(struct
> > +amdgpu_device *adev, uint32_t *smu_version);
> > +
> > #endif
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > index 1705e328c6fc..9874f947e2ad 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> > @@ -29,17 +29,14 @@
> > #include "amdgpu_drv.h"
> > #include "amdgpu_pm.h"
> > #include "amdgpu_dpm.h"
> > -#include "amdgpu_display.h"
> > #include "amdgpu_smu.h"
> > #include "atom.h"
> > -#include <linux/power_supply.h>
> > #include <linux/pci.h>
> > #include <linux/hwmon.h>
> > #include <linux/hwmon-sysfs.h>
> > #include <linux/nospec.h>
> > #include <linux/pm_runtime.h>
> > #include "hwmgr.h"
> > -#define WIDTH_4K 3840
> >
> > static const struct cg_flag_name clocks[] = {
> > {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock
> > Gating"}, @@ -81,45 +78,6 @@ static const struct hwmon_temp_label {
> > {PP_TEMP_MEM, "mem"},
> > };
> >
> > -void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) -{
> > - if (adev->pm.dpm_enabled) {
> > - mutex_lock(&adev->pm.mutex);
> > - if (power_supply_is_system_supplied() > 0)
> > - adev->pm.ac_power = true;
> > - else
> > - adev->pm.ac_power = false;
> > - if (adev->powerplay.pp_funcs &&
> > - adev->powerplay.pp_funcs->enable_bapm)
> > - amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
> > - mutex_unlock(&adev->pm.mutex);
> > -
> > - if (is_support_sw_smu(adev))
> > - smu_set_ac_dc(&adev->smu);
> > - }
> > -}
> > -
> > -int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
> > - void *data, uint32_t *size)
> > -{
> > - int ret = 0;
> > -
> > - if (!data || !size)
> > - return -EINVAL;
> > -
> > - if (is_support_sw_smu(adev))
> > - ret = smu_read_sensor(&adev->smu, sensor, data, size);
> > - else {
> > - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
> > - ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
> > - sensor, data, size);
> > - else
> > - ret = -EINVAL;
> > - }
> > -
> > - return ret;
> > -}
> > -
> > /**
> > * DOC: power_dpm_state
> > *
> > @@ -3636,338 +3594,6 @@ static const struct attribute_group *hwmon_groups[] = {
> > NULL
> > };
> >
> > -void amdgpu_dpm_thermal_work_handler(struct work_struct *work) -{
> > - struct amdgpu_device *adev =
> > - container_of(work, struct amdgpu_device,
> > - pm.dpm.thermal.work);
> > - /* switch to the thermal state */
> > - enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
> > - int temp, size = sizeof(temp);
> > -
> > - if (!adev->pm.dpm_enabled)
> > - return;
> > -
> > - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
> > - (void *)&temp, &size)) {
> > - if (temp < adev->pm.dpm.thermal.min_temp)
> > - /* switch back the user state */
> > - dpm_state = adev->pm.dpm.user_state;
> > - } else {
> > - if (adev->pm.dpm.thermal.high_to_low)
> > - /* switch back the user state */
> > - dpm_state = adev->pm.dpm.user_state;
> > - }
> > - mutex_lock(&adev->pm.mutex);
> > - if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
> > - adev->pm.dpm.thermal_active = true;
> > - else
> > - adev->pm.dpm.thermal_active = false;
> > - adev->pm.dpm.state = dpm_state;
> > - mutex_unlock(&adev->pm.mutex);
> > -
> > - amdgpu_pm_compute_clocks(adev);
> > -}
> > -
> > -static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
> > - enum amd_pm_state_type dpm_state)
> > -{
> > - int i;
> > - struct amdgpu_ps *ps;
> > - u32 ui_class;
> > - bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
> > - true : false;
> > -
> > - /* check if the vblank period is too short to adjust the mclk */
> > - if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
> > - if (amdgpu_dpm_vblank_too_short(adev))
> > - single_display = false;
> > - }
> > -
> > - /* certain older asics have a separare 3D performance state,
> > - * so try that first if the user selected performance
> > - */
> > - if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
> > - dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
> > - /* balanced states don't exist at the moment */
> > - if (dpm_state == POWER_STATE_TYPE_BALANCED)
> > - dpm_state = POWER_STATE_TYPE_PERFORMANCE;
> > -
> > -restart_search:
> > - /* Pick the best power state based on current conditions */
> > - for (i = 0; i < adev->pm.dpm.num_ps; i++) {
> > - ps = &adev->pm.dpm.ps[i];
> > - ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
> > - switch (dpm_state) {
> > - /* user states */
> > - case POWER_STATE_TYPE_BATTERY:
> > - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
> > - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
> > - if (single_display)
> > - return ps;
> > - } else
> > - return ps;
> > - }
> > - break;
> > - case POWER_STATE_TYPE_BALANCED:
> > - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
> > - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
> > - if (single_display)
> > - return ps;
> > - } else
> > - return ps;
> > - }
> > - break;
> > - case POWER_STATE_TYPE_PERFORMANCE:
> > - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
> > - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
> > - if (single_display)
> > - return ps;
> > - } else
> > - return ps;
> > - }
> > - break;
> > - /* internal states */
> > - case POWER_STATE_TYPE_INTERNAL_UVD:
> > - if (adev->pm.dpm.uvd_ps)
> > - return adev->pm.dpm.uvd_ps;
> > - else
> > - break;
> > - case POWER_STATE_TYPE_INTERNAL_UVD_SD:
> > - if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
> > - return ps;
> > - break;
> > - case POWER_STATE_TYPE_INTERNAL_UVD_HD:
> > - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
> > - return ps;
> > - break;
> > - case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
> > - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
> > - return ps;
> > - break;
> > - case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
> > - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
> > - return ps;
> > - break;
> > - case POWER_STATE_TYPE_INTERNAL_BOOT:
> > - return adev->pm.dpm.boot_ps;
> > - case POWER_STATE_TYPE_INTERNAL_THERMAL:
> > - if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
> > - return ps;
> > - break;
> > - case POWER_STATE_TYPE_INTERNAL_ACPI:
> > - if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
> > - return ps;
> > - break;
> > - case POWER_STATE_TYPE_INTERNAL_ULV:
> > - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
> > - return ps;
> > - break;
> > - case POWER_STATE_TYPE_INTERNAL_3DPERF:
> > - if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
> > - return ps;
> > - break;
> > - default:
> > - break;
> > - }
> > - }
> > - /* use a fallback state if we didn't match */
> > - switch (dpm_state) {
> > - case POWER_STATE_TYPE_INTERNAL_UVD_SD:
> > - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
> > - goto restart_search;
> > - case POWER_STATE_TYPE_INTERNAL_UVD_HD:
> > - case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
> > - case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
> > - if (adev->pm.dpm.uvd_ps) {
> > - return adev->pm.dpm.uvd_ps;
> > - } else {
> > - dpm_state = POWER_STATE_TYPE_PERFORMANCE;
> > - goto restart_search;
> > - }
> > - case POWER_STATE_TYPE_INTERNAL_THERMAL:
> > - dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
> > - goto restart_search;
> > - case POWER_STATE_TYPE_INTERNAL_ACPI:
> > - dpm_state = POWER_STATE_TYPE_BATTERY;
> > - goto restart_search;
> > - case POWER_STATE_TYPE_BATTERY:
> > - case POWER_STATE_TYPE_BALANCED:
> > - case POWER_STATE_TYPE_INTERNAL_3DPERF:
> > - dpm_state = POWER_STATE_TYPE_PERFORMANCE;
> > - goto restart_search;
> > - default:
> > - break;
> > - }
> > -
> > - return NULL;
> > -}
> > -
> > -static void amdgpu_dpm_change_power_state_locked(struct
> > amdgpu_device *adev) -{
> > - struct amdgpu_ps *ps;
> > - enum amd_pm_state_type dpm_state;
> > - int ret;
> > - bool equal = false;
> > -
> > - /* if dpm init failed */
> > - if (!adev->pm.dpm_enabled)
> > - return;
> > -
> > - if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
> > - /* add other state override checks here */
> > - if ((!adev->pm.dpm.thermal_active) &&
> > - (!adev->pm.dpm.uvd_active))
> > - adev->pm.dpm.state = adev->pm.dpm.user_state;
> > - }
> > - dpm_state = adev->pm.dpm.state;
> > -
> > - ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
> > - if (ps)
> > - adev->pm.dpm.requested_ps = ps;
> > - else
> > - return;
> > -
> > - if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
> > - printk("switching from power state:\n");
> > - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
> > - printk("switching to power state:\n");
> > - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
> > - }
> > -
> > - /* update whether vce is active */
> > - ps->vce_active = adev->pm.dpm.vce_active;
> > - if (adev->powerplay.pp_funcs->display_configuration_changed)
> > - amdgpu_dpm_display_configuration_changed(adev);
> > -
> > - ret = amdgpu_dpm_pre_set_power_state(adev);
> > - if (ret)
> > - return;
> > -
> > - if (adev->powerplay.pp_funcs->check_state_equal) {
> > - if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
> > - equal = false;
> > - }
> > -
> > - if (equal)
> > - return;
> > -
> > - amdgpu_dpm_set_power_state(adev);
> > - amdgpu_dpm_post_set_power_state(adev);
> > -
> > - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
> > - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
> > -
> > - if (adev->powerplay.pp_funcs->force_performance_level) {
> > - if (adev->pm.dpm.thermal_active) {
> > - enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
> > - /* force low perf level for thermal */
> > - amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
> > - /* save the user's level */
> > - adev->pm.dpm.forced_level = level;
> > - } else {
> > - /* otherwise, user selected level */
> > - amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
> > - }
> > - }
> > -}
> > -
> > -void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
> > -{
> > - int ret = 0;
> > -
> > - if (adev->family == AMDGPU_FAMILY_SI) {
> > - mutex_lock(&adev->pm.mutex);
> > - if (enable) {
> > - adev->pm.dpm.uvd_active = true;
> > - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
> > - } else {
> > - adev->pm.dpm.uvd_active = false;
> > - }
> > - mutex_unlock(&adev->pm.mutex);
> > -
> > - amdgpu_pm_compute_clocks(adev);
> > - } else {
> > - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
> > - if (ret)
> > - DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
> > - enable ? "enable" : "disable", ret);
> > -
> > - /* enable/disable Low Memory PState for UVD (4k videos) */
> > - if (adev->asic_type == CHIP_STONEY &&
> > - adev->uvd.decode_image_width >= WIDTH_4K) {
> > - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
> > -
> > - if (hwmgr && hwmgr->hwmgr_func &&
> > - hwmgr->hwmgr_func->update_nbdpm_pstate)
> > - hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
> > - !enable,
> > - true);
> > - }
> > - }
> > -}
> > -
> > -void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
> > -{
> > - int ret = 0;
> > -
> > - if (adev->family == AMDGPU_FAMILY_SI) {
> > - mutex_lock(&adev->pm.mutex);
> > - if (enable) {
> > - adev->pm.dpm.vce_active = true;
> > - /* XXX select vce level based on ring/task */
> > - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
> > - } else {
> > - adev->pm.dpm.vce_active = false;
> > - }
> > - mutex_unlock(&adev->pm.mutex);
> > -
> > - amdgpu_pm_compute_clocks(adev);
> > - } else {
> > - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
> > - if (ret)
> > - DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
> > - enable ? "enable" : "disable", ret);
> > - }
> > -}
> > -
> > -void amdgpu_pm_print_power_states(struct amdgpu_device *adev) -{
> > - int i;
> > -
> > - if (adev->powerplay.pp_funcs->print_power_state == NULL)
> > - return;
> > -
> > - for (i = 0; i < adev->pm.dpm.num_ps; i++)
> > - amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
> > -
> > -}
> > -
> > -void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool
> > enable) -{
> > - int ret = 0;
> > -
> > - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
> > - if (ret)
> > - DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
> > - enable ? "enable" : "disable", ret);
> > -}
> > -
> > -int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev,
> > uint32_t *smu_version) -{
> > - int r;
> > -
> > - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
> > - r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
> > - if (r) {
> > - pr_err("smu firmware loading failed\n");
> > - return r;
> > - }
> > - *smu_version = adev->pm.fw_version;
> > - }
> > - return 0;
> > -}
> > -
> > int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
> > {
> > int ret;
> > @@ -4028,55 +3654,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
> > amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
> > }
> >
> > -void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) -{
> > - int i = 0;
> > -
> > - if (!adev->pm.dpm_enabled)
> > - return;
> > -
> > - if (adev->mode_info.num_crtc)
> > - amdgpu_display_bandwidth_update(adev);
> > -
> > - for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
> > - struct amdgpu_ring *ring = adev->rings[i];
> > - if (ring && ring->sched.ready)
> > - amdgpu_fence_wait_empty(ring);
> > - }
> > -
> > - if (is_support_sw_smu(adev)) {
> > - struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
> > - smu_handle_task(&adev->smu,
> > - smu_dpm->dpm_level,
> > - AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
> > - true);
> > - } else {
> > - if (adev->powerplay.pp_funcs->dispatch_tasks) {
> > - if (!amdgpu_device_has_dc_support(adev)) {
> > - mutex_lock(&adev->pm.mutex);
> > - amdgpu_dpm_get_active_displays(adev);
> > - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
> > - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
> > - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
> > - /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
> > - if (adev->pm.pm_display_cfg.vrefresh > 120)
> > - adev->pm.pm_display_cfg.min_vblank_time = 0;
> > - if (adev->powerplay.pp_funcs->display_configuration_change)
> > - adev->powerplay.pp_funcs->display_configuration_change(
> > - adev->powerplay.pp_handle,
> > - &adev->pm.pm_display_cfg);
> > - mutex_unlock(&adev->pm.mutex);
> > - }
> > - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
> > - } else {
> > - mutex_lock(&adev->pm.mutex);
> > - amdgpu_dpm_get_active_displays(adev);
> > - amdgpu_dpm_change_power_state_locked(adev);
> > - mutex_unlock(&adev->pm.mutex);
> > - }
> > - }
> > -}
> > -
> > /*
> > * Debugfs info
> > */
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
> > index d9ae2b49a402..45a22e101d15 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
> > @@ -79,18 +79,10 @@ struct amdgpu_device_attr_entry {
> > amdgpu_get_##_name, NULL, \
> > _flags, ##__VA_ARGS__)
> >
> > -void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
> > int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
> > int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
> > void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
> > void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev); -void
> > amdgpu_pm_print_power_states(struct amdgpu_device *adev); -int
> > amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t
> > *smu_version); -void amdgpu_pm_compute_clocks(struct amdgpu_device
> > *adev); -void amdgpu_dpm_thermal_work_handler(struct work_struct
> > *work); -void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool
> > enable); -void amdgpu_dpm_enable_vce(struct amdgpu_device *adev,
> > bool enable); -void amdgpu_dpm_enable_jpeg(struct amdgpu_device
> > *adev, bool enable);
> >
> > int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
> >
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flist
> s.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7Cev
> an.quan%40amd.com%7Cf9fcda91b3c64a60a4ba08d83f904d77%7C3dd8961fe4884e6
> 08e11a82d994e183d%7C0%7C0%7C637329236297410452&sdata=dxfigPWqRxyc4
> 88M%2BE4L5RfbODakgzSRyYxnX0rA0Ak%3D&reserved=0
More information about the amd-gfx
mailing list