[Intel-xe] [PATCH v2 1/3] drm/xe/hwmon: Wrap 64 bit reg accesses to xe_hwmon_process_reg

Nilawar, Badal badal.nilawar at intel.com
Thu Oct 26 08:00:56 UTC 2023



On 19-10-2023 11:34, Gupta, Anshuman wrote:
> 
> 
>> -----Original Message-----
>> From: Nilawar, Badal <badal.nilawar at intel.com>
>> Sent: Thursday, October 19, 2023 12:34 AM
>> To: intel-xe at lists.freedesktop.org
>> Cc: Gupta, Anshuman <anshuman.gupta at intel.com>; Dixit, Ashutosh
>> <ashutosh.dixit at intel.com>; andi.shyti at linux.intel.com; Tauro, Riana
>> <riana.tauro at intel.com>; Vivi, Rodrigo <rodrigo.vivi at intel.com>
>> Subject: [PATCH v2 1/3] drm/xe/hwmon: Wrap 64 bit reg accesses to
>> xe_hwmon_process_reg
>>
>> Maintain single wrapper for 32 bit and 64 bit reg accesses
> Missing full stop here.
> The patch should do only  the stuff mentioned in subject, converting function
> to void should be another patch.
I think I will rename patch as refactor patch and add some details about 
it in commit message.

Regards,
Badal
> With that,
> Reviewed-by: Anshuman Gupta <anshuman.gupta at intel.com>
>>
>> Cc: Anshuman Gupta <anshuman.gupta at intel.com>
>> Cc: Ashutosh Dixit <ashutosh.dixit at intel.com>
>> Signed-off-by: Badal Nilawar <badal.nilawar at intel.com>
>> ---
>>   drivers/gpu/drm/xe/xe_hwmon.c | 122 +++++++++++++---------------------
>>   1 file changed, 45 insertions(+), 77 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/xe/xe_hwmon.c
>> b/drivers/gpu/drm/xe/xe_hwmon.c index 9ac05994a967..08c7289723ae
>> 100644
>> --- a/drivers/gpu/drm/xe/xe_hwmon.c
>> +++ b/drivers/gpu/drm/xe/xe_hwmon.c
>> @@ -26,9 +26,9 @@ enum xe_hwmon_reg {
>>   };
>>
>>   enum xe_hwmon_reg_operation {
>> -	REG_READ,
>> -	REG_WRITE,
>> -	REG_RMW,
>> +	REG_READ32,
>> +	REG_RMW32,
>> +	REG_READ64,
>>   };
>>
>>   /*
>> @@ -95,49 +95,34 @@ static u32 xe_hwmon_get_reg(struct xe_hwmon
>> *hwmon, enum xe_hwmon_reg hwmon_reg)
>>   	return reg.raw;
>>   }
>>
>> -static int xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>> xe_hwmon_reg hwmon_reg,
>> -				enum xe_hwmon_reg_operation operation,
>> u32 *value,
>> -				u32 clr, u32 set)
>> +static void xe_hwmon_process_reg(struct xe_hwmon *hwmon, enum
>> xe_hwmon_reg hwmon_reg,
>> +				 enum xe_hwmon_reg_operation operation,
>> u64 *value,
>> +				 u32 clr, u32 set)
>>   {
>>   	struct xe_reg reg;
>>
>>   	reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>>
>>   	if (!reg.raw)
>> -		return -EOPNOTSUPP;
>> +		return;
>>
>>   	switch (operation) {
>> -	case REG_READ:
>> +	case REG_READ32:
>>   		*value = xe_mmio_read32(hwmon->gt, reg);
>> -		return 0;
>> -	case REG_WRITE:
>> -		xe_mmio_write32(hwmon->gt, reg, *value);
>> -		return 0;
>> -	case REG_RMW:
>> +		break;
>> +	case REG_RMW32:
>>   		*value = xe_mmio_rmw32(hwmon->gt, reg, clr, set);
>> -		return 0;
>> +		break;
>> +	case REG_READ64:
>> +		*value = xe_mmio_read64_2x32(hwmon->gt, reg);
>> +		break;
>>   	default:
>>   		drm_warn(&gt_to_xe(hwmon->gt)->drm, "Invalid xe hwmon
>> reg operation: %d\n",
>>   			 operation);
>> -		return -EOPNOTSUPP;
>> +		break;
>>   	}
>>   }
>>
>> -static int xe_hwmon_process_reg_read64(struct xe_hwmon *hwmon,
>> -				       enum xe_hwmon_reg hwmon_reg, u64
>> *value)
>> -{
>> -	struct xe_reg reg;
>> -
>> -	reg.raw = xe_hwmon_get_reg(hwmon, hwmon_reg);
>> -
>> -	if (!reg.raw)
>> -		return -EOPNOTSUPP;
>> -
>> -	*value = xe_mmio_read64_2x32(hwmon->gt, reg);
>> -
>> -	return 0;
>> -}
>> -
>>   #define PL1_DISABLE 0
>>
>>   /*
>> @@ -146,42 +131,39 @@ static int xe_hwmon_process_reg_read64(struct
>> xe_hwmon *hwmon,
>>    * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
>>    * clamped values when read.
>>    */
>> -static int xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
>> *value)
>> +static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, long
>> +*value)
>>   {
>> -	u32 reg_val;
>> -	u64 reg_val64, min, max;
>> +	u64 reg_val, min, max;
>>
>> -	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_READ,
>> &reg_val, 0, 0);
>> +	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
>> REG_READ32, &reg_val,
>> +0, 0);
>>   	/* Check if PL1 limit is disabled */
>>   	if (!(reg_val & PKG_PWR_LIM_1_EN)) {
>>   		*value = PL1_DISABLE;
>> -		return 0;
>> +		return;
>>   	}
>>
>>   	reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
>>   	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon-
>>> scl_shift_power);
>>
>> -	xe_hwmon_process_reg_read64(hwmon, REG_PKG_POWER_SKU,
>> &reg_val64);
>> -	min = REG_FIELD_GET(PKG_MIN_PWR, reg_val64);
>> +	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU,
>> REG_READ64, &reg_val, 0, 0);
>> +	min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
>>   	min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
>> -	max = REG_FIELD_GET(PKG_MAX_PWR, reg_val64);
>> +	max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
>>   	max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
>>
>>   	if (min && max)
>>   		*value = clamp_t(u64, *value, min, max);
>> -
>> -	return 0;
>>   }
>>
>>   static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, long
>> value)  {
>> -	u32 reg_val;
>> +	u64 reg_val;
>>
>>   	/* Disable PL1 limit and verify, as limit cannot be disabled on all
>> platforms */
>>   	if (value == PL1_DISABLE) {
>> -		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
>> REG_RMW, &reg_val,
>> +		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
>> REG_RMW32, &reg_val,
>>   				     PKG_PWR_LIM_1_EN, 0);
>> -		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
>> REG_READ, &reg_val,
>> +		xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
>> REG_READ32, &reg_val,
>>   				     PKG_PWR_LIM_1_EN, 0);
>>
>>   		if (reg_val & PKG_PWR_LIM_1_EN)
>> @@ -192,21 +174,19 @@ static int xe_hwmon_power_max_write(struct
>> xe_hwmon *hwmon, long value)
>>   	reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon-
>>> scl_shift_power, SF_POWER);
>>   	reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1,
>> reg_val);
>>
>> -	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT, REG_RMW,
>> &reg_val,
>> +	xe_hwmon_process_reg(hwmon, REG_PKG_RAPL_LIMIT,
>> REG_RMW32, &reg_val,
>>   			     PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
>>
>>   	return 0;
>>   }
>>
>> -static int xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon,
>> long *value)
>> +static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon,
>> long
>> +*value)
>>   {
>> -	u32 reg_val;
>> +	u64 reg_val;
>>
>> -	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU, REG_READ,
>> &reg_val, 0, 0);
>> +	xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU,
>> REG_READ32, &reg_val,
>> +0, 0);
>>   	reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
>>   	*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon-
>>> scl_shift_power);
>> -
>> -	return 0;
>>   }
>>
>>   /*
>> @@ -233,13 +213,11 @@ static void
>>   xe_hwmon_energy_get(struct xe_hwmon *hwmon, long *energy)  {
>>   	struct xe_hwmon_energy_info *ei = &hwmon->ei;
>> -	u32 reg_val;
>> -
>> -	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> +	u64 reg_val;
>>
>>   	mutex_lock(&hwmon->hwmon_lock);
>>
>> -	xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS,
>> REG_READ,
>> +	xe_hwmon_process_reg(hwmon, REG_PKG_ENERGY_STATUS,
>> REG_READ32,
>>   			     &reg_val, 0, 0);
>>
>>   	if (reg_val >= ei->reg_val_prev)
>> @@ -253,8 +231,6 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon,
>> long *energy)
>>   				  hwmon->scl_shift_energy);
>>
>>   	mutex_unlock(&hwmon->hwmon_lock);
>> -
>> -	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>>   }
>>
>>   static const struct hwmon_channel_info *hwmon_info[] = { @@ -284,16
>> +260,14 @@ static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
>>   			      uval);
>>   }
>>
>> -static int xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
>> +static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, long *value)
>>   {
>> -	u32 reg_val;
>> +	u64 reg_val;
>>
>>   	xe_hwmon_process_reg(hwmon, REG_GT_PERF_STATUS,
>> -			     REG_READ, &reg_val, 0, 0);
>> +			     REG_READ32, &reg_val, 0, 0);
>>   	/* HW register value in units of 2.5 millivolt */
>>   	*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK,
>> reg_val) * 2500, SF_VOLTAGE);
>> -
>> -	return 0;
>>   }
>>
>>   static umode_t
>> @@ -322,9 +296,11 @@ xe_hwmon_power_read(struct xe_hwmon
>> *hwmon, u32 attr, int chan, long *val)
>>
>>   	switch (attr) {
>>   	case hwmon_power_max:
>> -		return xe_hwmon_power_max_read(hwmon, val);
>> +		xe_hwmon_power_max_read(hwmon, val);
>> +		return 0;
>>   	case hwmon_power_rated_max:
>> -		return xe_hwmon_power_rated_max_read(hwmon, val);
>> +		xe_hwmon_power_rated_max_read(hwmon, val);
>> +		return 0;
>>   	case hwmon_power_crit:
>>   		ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
>>   		if (ret)
>> @@ -418,21 +394,13 @@ xe_hwmon_in_is_visible(struct xe_hwmon
>> *hwmon, u32 attr)  static int  xe_hwmon_in_read(struct xe_hwmon *hwmon,
>> u32 attr, long *val)  {
>> -	int ret;
>> -
>> -	xe_device_mem_access_get(gt_to_xe(hwmon->gt));
>> -
>>   	switch (attr) {
>>   	case hwmon_in_input:
>> -		ret = xe_hwmon_get_voltage(hwmon, val);
>> -		break;
>> +		xe_hwmon_get_voltage(hwmon, val);
>> +		return 0;
>>   	default:
>> -		ret = -EOPNOTSUPP;
>> +		return -EOPNOTSUPP;
>>   	}
>> -
>> -	xe_device_mem_access_put(gt_to_xe(hwmon->gt));
>> -
>> -	return ret;
>>   }
>>
>>   static umode_t
>> @@ -564,15 +532,15 @@ xe_hwmon_get_preregistration_info(struct
>> xe_device *xe)  {
>>   	struct xe_hwmon *hwmon = xe->hwmon;
>>   	long energy;
>> -	u32 val_sku_unit = 0;
>> -	int ret;
>> +	u64 val_sku_unit = 0;
>>
>> -	ret = xe_hwmon_process_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
>> REG_READ, &val_sku_unit, 0, 0);
>>   	/*
>>   	 * The contents of register PKG_POWER_SKU_UNIT do not change,
>>   	 * so read it once and store the shift values.
>>   	 */
>> -	if (!ret) {
>> +	if (xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT)) {
>> +		xe_hwmon_process_reg(hwmon,
>> REG_PKG_POWER_SKU_UNIT,
>> +				     REG_READ32, &val_sku_unit, 0, 0);
>>   		hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT,
>> val_sku_unit);
>>   		hwmon->scl_shift_energy =
>> REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
>>   	}
>> --
>> 2.25.1
> 


More information about the Intel-xe mailing list