[PATCH v5 23/27] drm/amd/powerplay: Scope all PM queued work with drm_dev_enter/exit
Andrey Grodzovsky
andrey.grodzovsky at amd.com
Wed Apr 28 15:12:03 UTC 2021
To allow completion and further block of HW accesses post device PCI
remove.
Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky at amd.com>
---
drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 44 +++++++++++++----------
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 26 +++++++++++---
2 files changed, 47 insertions(+), 23 deletions(-)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 8fb12afe3c96..649e10d52d17 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -31,6 +31,7 @@
#include "amdgpu_display.h"
#include "hwmgr.h"
#include <linux/power_supply.h>
+#include <drm/drm_drv.h>
#define WIDTH_4K 3840
@@ -1316,29 +1317,36 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
/* switch to the thermal state */
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
+ int idx;
if (!adev->pm.dpm_enabled)
return;
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
- (void *)&temp, &size)) {
- if (temp < adev->pm.dpm.thermal.min_temp)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- } else {
- if (adev->pm.dpm.thermal.high_to_low)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- }
- mutex_lock(&adev->pm.mutex);
- if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
- adev->pm.dpm.thermal_active = true;
- else
- adev->pm.dpm.thermal_active = false;
- adev->pm.dpm.state = dpm_state;
- mutex_unlock(&adev->pm.mutex);
+ if (drm_dev_enter(&adev->ddev, &idx)) {
- amdgpu_pm_compute_clocks(adev);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+ (void *)&temp, &size)) {
+ if (temp < adev->pm.dpm.thermal.min_temp)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ } else {
+ if (adev->pm.dpm.thermal.high_to_low)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ }
+ mutex_lock(&adev->pm.mutex);
+ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+ adev->pm.dpm.thermal_active = true;
+ else
+ adev->pm.dpm.thermal_active = false;
+ adev->pm.dpm.state = dpm_state;
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+
+ drm_dev_exit(idx);
+
+ }
}
static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index d143ef1b460b..f034c8a5eb44 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -25,6 +25,8 @@
#include <linux/firmware.h>
#include <linux/pci.h>
+#include <drm/drm_drv.h>
+
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
@@ -904,21 +906,35 @@ static void smu_throttling_logging_work_fn(struct work_struct *work)
{
struct smu_context *smu = container_of(work, struct smu_context,
throttling_logging_work);
+ int idx;
+
+
+ if (drm_dev_enter(&smu->adev->ddev, &idx)) {
+
+ smu_log_thermal_throttling(smu);
- smu_log_thermal_throttling(smu);
+ drm_dev_exit(idx);
+ }
}
static void smu_interrupt_work_fn(struct work_struct *work)
{
struct smu_context *smu = container_of(work, struct smu_context,
interrupt_work);
+ int idx;
- mutex_lock(&smu->mutex);
+ if (drm_dev_enter(&smu->adev->ddev, &idx)) {
- if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
- smu->ppt_funcs->interrupt_work(smu);
+ mutex_lock(&smu->mutex);
- mutex_unlock(&smu->mutex);
+ if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
+ smu->ppt_funcs->interrupt_work(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ drm_dev_exit(idx);
+
+ }
}
static int smu_sw_init(void *handle)
--
2.25.1
More information about the dri-devel
mailing list