[PATCH 09/14] drm/xe: Convert hwmon from mem_access to xe_pm_runtime calls
Rodrigo Vivi
rodrigo.vivi at intel.com
Thu Feb 22 16:39:32 UTC 2024
Continue the work to kill the mem_access in favor of a pure runtime pm.
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
---
drivers/gpu/drm/xe/xe_hwmon.c | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index b82233a41606..a256af8c2012 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -18,6 +18,7 @@
#include "xe_pcode.h"
#include "xe_pcode_api.h"
#include "xe_sriov.h"
+#include "xe_pm.h"
enum xe_hwmon_reg {
REG_PKG_RAPL_LIMIT,
@@ -266,7 +267,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
mutex_lock(&hwmon->hwmon_lock);
@@ -275,7 +276,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
mutex_unlock(&hwmon->hwmon_lock);
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
@@ -354,7 +355,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
mutex_lock(&hwmon->hwmon_lock);
@@ -363,7 +364,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
mutex_unlock(&hwmon->hwmon_lock);
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return count;
}
@@ -384,12 +385,12 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
if (attr == &sensor_dev_attr_power1_max_interval.dev_attr.attr)
ret = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT) ? attr->mode : 0;
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -610,7 +611,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
@@ -630,7 +631,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -642,7 +643,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
@@ -662,7 +663,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
@@ -674,7 +675,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_device_mem_access_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(gt_to_xe(hwmon->gt));
switch (type) {
case hwmon_power:
@@ -688,7 +689,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
- xe_device_mem_access_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(gt_to_xe(hwmon->gt));
return ret;
}
--
2.43.2
More information about the Intel-xe
mailing list