<div dir="auto">On Mon, Mar 17, 2025 at 11:04 AM Alex Deucher <<a href="mailto:alexdeucher@gmail.com" target="_blank" rel="noreferrer">alexdeucher@gmail.com</a>> wrote:<br>
><br>
> On Mon, Mar 17, 2025 at 2:38 AM Alexandre Demers<br>
> <<a href="mailto:alexandre.f.demers@gmail.com" target="_blank" rel="noreferrer">alexandre.f.demers@gmail.com</a>> wrote:<br>
> ><br>
> > Signed-off-by: Alexandre Demers <<a href="mailto:alexandre.f.demers@gmail.com" target="_blank" rel="noreferrer">alexandre.f.demers@gmail.com</a>><br>
> > ---<br>
> >  drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 338 +++++++++++----------<br>
> >  drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c |  36 +--<br>
> >  2 files changed, 190 insertions(+), 184 deletions(-)<br>
> ><br>
> > diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c<br>
> > index 975912f285d7..0f34aaf773b7 100644<br>
> > --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c<br>
> > +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c<br>
> > @@ -2209,7 +2209,7 @@ static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)<br>
> >         if (xclk == 0)<br>
> >                 return 0;<br>
> ><br>
> > -       cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;<br>
> > +       cac_window = RREG32(ixCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK;<br>
><br>
> This looks wrong.  The ix prefix means the register offset is an index<br>
> rather than an offset and not directly accessible.  If you are using<br>
> the ix registers they should be using an indirect register accessor.<br>
> Same comment on the other cases below.<br>
><br>
> Alex<br>
<br>
Hmmm, I misunderstood the meaning and I wrongly renamed the defines. I'll fix it<div dir="auto"><br></div><div dir="auto">Alexandre <br>
><br>
> >         cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);<br>
> ><br>
> >         wintime = (cac_window_size * 100) / xclk;<br>
> > @@ -2505,19 +2505,19 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,<br>
> >         if (adev->pm.dpm.sq_ramping_threshold == 0)<br>
> >                 return -EINVAL;<br>
> ><br>
> > -       if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))<br>
> > +       if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT))<br>
> >                 enable_sq_ramping = false;<br>
> ><br>
> > -       if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))<br>
> > +       if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT))<br>
> >                 enable_sq_ramping = false;<br>
> ><br>
> > -       if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))<br>
> > +       if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT))<br>
> >                 enable_sq_ramping = false;<br>
> ><br>
> > -       if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))<br>
> > +       if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT))<br>
> >                 enable_sq_ramping = false;<br>
> ><br>
> > -       if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))<br>
> > +       if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT))<br>
> >                 enable_sq_ramping = false;<br>
> ><br>
> >         for (i = 0; i < state->performance_level_count; i++) {<br>
> > @@ -2526,14 +2526,17 @@ static int si_populate_sq_ramping_values(struct amdgpu_device *adev,<br>
> ><br>
> >                 if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&<br>
> >                     enable_sq_ramping) {<br>
> > -                       sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);<br>
> > -                       sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);<br>
> > -                       sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);<br>
> > -                       sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);<br>
> > -                       sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);<br>
> > +                       sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT;<br>
> > +                       sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT;<br>
> > +                       sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT;<br>
> > +                       sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT;<br>
> > +                       sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT;<br>
> >                 } else {<br>
> > -                       sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;<br>
> > -                       sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;<br>
> > +                       sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK |<br>
> > +                                                               SQ_POWER_THROTTLE__MIN_POWER_MASK;<br>
> > +                       sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |<br>
> > +                                                               SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |<br>
> > +                                                               SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;<br>
> >                 }<br>
> ><br>
> >                 smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);<br>
> > @@ -2777,9 +2780,9 @@ static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)<br>
> >         if (!cac_tables)<br>
> >                 return -ENOMEM;<br>
> ><br>
> > -       reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;<br>
> > -       reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);<br>
> > -       WREG32(CG_CAC_CTRL, reg);<br>
> > +       reg = RREG32(ixCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK;<br>
> > +       reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT);<br>
> > +       WREG32(ixCG_CAC_CTRL, reg);<br>
> ><br>
> >         si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;<br>
> >         si_pi->dyn_powertune_data.dc_pwr_value =<br>
> > @@ -2978,10 +2981,10 @@ static int si_init_smc_spll_table(struct amdgpu_device *adev)<br>
> >                 ret = si_calculate_sclk_params(adev, sclk, &sclk_params);<br>
> >                 if (ret)<br>
> >                         break;<br>
> > -               p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;<br>
> > -               fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;<br>
> > -               clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;<br>
> > -               clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;<br>
> > +               p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;<br>
> > +               fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;<br>
> > +               clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;<br>
> > +               clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;<br>
> ><br>
> >                 fb_div &= ~0x00001FFF;<br>
> >                 fb_div >>= 1;<br>
> > @@ -3685,10 +3688,10 @@ static bool si_is_special_1gb_platform(struct amdgpu_device *adev)<br>
> >         WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);<br>
> >         width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;<br>
> ><br>
> > -       tmp = RREG32(MC_ARB_RAMCFG);<br>
> > -       row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;<br>
> > -       column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;<br>
> > -       bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;<br>
> > +       tmp = RREG32(mmMC_ARB_RAMCFG);<br>
> > +       row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10;<br>
> > +       column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8;<br>
> > +       bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2;<br>
> ><br>
> >         density = (1 << (row + column - 20 + bank)) * width;<br>
> ><br>
> > @@ -3772,11 +3775,11 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)<br>
> >         }<br>
> ><br>
> >         if (want_thermal_protection) {<br>
> > -               WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);<br>
> > +               WREG32_P(ixCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK);<br>
> >                 if (pi->thermal_protection)<br>
> > -                       WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);<br>
> > +                       WREG32_P(ixGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);<br>
> >         } else {<br>
> > -               WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);<br>
> > +               WREG32_P(ixGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);<br>
> >         }<br>
> >  }<br>
> ><br>
> > @@ -3801,20 +3804,20 @@ static void si_enable_auto_throttle_source(struct amdgpu_device *adev,<br>
> ><br>
> >  static void si_start_dpm(struct amdgpu_device *adev)<br>
> >  {<br>
> > -       WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);<br>
> > +       WREG32_P(ixGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);<br>
> >  }<br>
> ><br>
> >  static void si_stop_dpm(struct amdgpu_device *adev)<br>
> >  {<br>
> > -       WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);<br>
> > +       WREG32_P(ixGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);<br>
> >  }<br>
> ><br>
> >  static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)<br>
> >  {<br>
> >         if (enable)<br>
> > -               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);<br>
> > +               WREG32_P(ixSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);<br>
> >         else<br>
> > -               WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);<br>
> > +               WREG32_P(ixSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);<br>
> ><br>
> >  }<br>
> ><br>
> > @@ -3854,7 +3857,7 @@ static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power<br>
> >  static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,<br>
> >                                                       PPSMC_Msg msg, u32 parameter)<br>
> >  {<br>
> > -       WREG32(SMC_SCRATCH0, parameter);<br>
> > +       WREG32(ixSMC_SCRATCH0, parameter);<br>
> >         return amdgpu_si_send_msg_to_smc(adev, msg);<br>
> >  }<br>
> ><br>
> > @@ -4039,12 +4042,12 @@ static void si_read_clock_registers(struct amdgpu_device *adev)<br>
> >  {<br>
> >         struct si_power_info *si_pi = si_get_pi(adev);<br>
> ><br>
> > -       si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);<br>
> > -       si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);<br>
> > -       si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);<br>
> > -       si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);<br>
> > -       si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);<br>
> > -       si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);<br>
> > +       si_pi->clock_registers.cg_spll_func_cntl = RREG32(ixCG_SPLL_FUNC_CNTL);<br>
> > +       si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(ixCG_SPLL_FUNC_CNTL_2);<br>
> > +       si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(ixCG_SPLL_FUNC_CNTL_3);<br>
> > +       si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(ixCG_SPLL_FUNC_CNTL_4);<br>
> > +       si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(ixCG_SPLL_SPREAD_SPECTRUM);<br>
> > +       si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(ixCG_SPLL_SPREAD_SPECTRUM_2);<br>
> >         si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);<br>
> >         si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);<br>
> >         si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);<br>
> > @@ -4060,14 +4063,14 @@ static void si_enable_thermal_protection(struct amdgpu_device *adev,<br>
> >                                           bool enable)<br>
> >  {<br>
> >         if (enable)<br>
> > -               WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);<br>
> > +               WREG32_P(ixGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);<br>
> >         else<br>
> > -               WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);<br>
> > +               WREG32_P(ixGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);<br>
> >  }<br>
> ><br>
> >  static void si_enable_acpi_power_management(struct amdgpu_device *adev)<br>
> >  {<br>
> > -       WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);<br>
> > +       WREG32_P(ixGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK);<br>
> >  }<br>
> ><br>
> >  #if 0<br>
> > @@ -4148,9 +4151,9 @@ static void si_program_ds_registers(struct amdgpu_device *adev)<br>
> >                 tmp = 0x1;<br>
> ><br>
> >         if (eg_pi->sclk_deep_sleep) {<br>
> > -               WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);<br>
> > -               WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,<br>
> > -                        ~AUTOSCALE_ON_SS_CLEAR);<br>
> > +               WREG32_P(ixMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK);<br>
> > +               WREG32_P(ixCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK,<br>
> > +                        ~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK);<br>
> >         }<br>
> >  }<br>
> ><br>
> > @@ -4159,18 +4162,18 @@ static void si_program_display_gap(struct amdgpu_device *adev)<br>
> >         u32 tmp, pipe;<br>
> >         int i;<br>
> ><br>
> > -       tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);<br>
> > +       tmp = RREG32(ixCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);<br>
> >         if (adev->pm.dpm.new_active_crtc_count > 0)<br>
> > -               tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);<br>
> > +               tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;<br>
> >         else<br>
> > -               tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);<br>
> > +               tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;<br>
> ><br>
> >         if (adev->pm.dpm.new_active_crtc_count > 1)<br>
> > -               tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);<br>
> > +               tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;<br>
> >         else<br>
> > -               tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);<br>
> > +               tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;<br>
> ><br>
> > -       WREG32(CG_DISPLAY_GAP_CNTL, tmp);<br>
> > +       WREG32(ixCG_DISPLAY_GAP_CNTL, tmp);<br>
> ><br>
> >         tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);<br>
> >         pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;<br>
> > @@ -4205,10 +4208,10 @@ static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)<br>
> ><br>
> >         if (enable) {<br>
> >                 if (pi->sclk_ss)<br>
> > -                       WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);<br>
> > +                       WREG32_P(ixGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);<br>
> >         } else {<br>
> > -               WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);<br>
> > -               WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);<br>
> > +               WREG32_P(ixCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);<br>
> > +               WREG32_P(ixGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);<br>
> >         }<br>
> >  }<br>
> ><br>
> > @@ -4230,15 +4233,15 @@ static void si_setup_bsp(struct amdgpu_device *adev)<br>
> >                                &pi->pbsu);<br>
> ><br>
> ><br>
> > -        pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);<br>
> > -       pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);<br>
> > +        pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT);<br>
> > +       pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT);<br>
> ><br>
> > -       WREG32(CG_BSP, pi->dsp);<br>
> > +       WREG32(ixCG_BSP, pi->dsp);<br>
> >  }<br>
> ><br>
> >  static void si_program_git(struct amdgpu_device *adev)<br>
> >  {<br>
> > -       WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);<br>
> > +       WREG32_P(ixCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK);<br>
> >  }<br>
> ><br>
> >  static void si_program_tp(struct amdgpu_device *adev)<br>
> > @@ -4247,54 +4250,54 @@ static void si_program_tp(struct amdgpu_device *adev)<br>
> >         enum r600_td td = R600_TD_DFLT;<br>
> ><br>
> >         for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)<br>
> > -               WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));<br>
> > +               WREG32(ixCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT));<br>
> ><br>
> >         if (td == R600_TD_AUTO)<br>
> > -               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);<br>
> > +               WREG32_P(ixSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);<br>
> >         else<br>
> > -               WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);<br>
> > +               WREG32_P(ixSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);<br>
> ><br>
> >         if (td == R600_TD_UP)<br>
> > -               WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);<br>
> > +               WREG32_P(ixSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);<br>
> ><br>
> >         if (td == R600_TD_DOWN)<br>
> > -               WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);<br>
> > +               WREG32_P(ixSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);<br>
> >  }<br>
> ><br>
> >  static void si_program_tpp(struct amdgpu_device *adev)<br>
> >  {<br>
> > -       WREG32(CG_TPC, R600_TPC_DFLT);<br>
> > +       WREG32(ixCG_TPC, R600_TPC_DFLT);<br>
> >  }<br>
> ><br>
> >  static void si_program_sstp(struct amdgpu_device *adev)<br>
> >  {<br>
> > -       WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));<br>
> > +       WREG32(ixCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT));<br>
> >  }<br>
> ><br>
> >  static void si_enable_display_gap(struct amdgpu_device *adev)<br>
> >  {<br>
> > -       u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);<br>
> > +       u32 tmp = RREG32(ixCG_DISPLAY_GAP_CNTL);<br>
> ><br>
> > -       tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);<br>
> > -       tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |<br>
> > -               DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));<br>
> > +       tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);<br>
> > +       tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT |<br>
> > +               R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT);<br>
> ><br>
> > -       tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);<br>
> > -       tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |<br>
> > -               DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));<br>
> > -       WREG32(CG_DISPLAY_GAP_CNTL, tmp);<br>
> > +       tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK);<br>
> > +       tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT |<br>
> > +               R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT);<br>
> > +       WREG32(ixCG_DISPLAY_GAP_CNTL, tmp);<br>
> >  }<br>
> ><br>
> >  static void si_program_vc(struct amdgpu_device *adev)<br>
> >  {<br>
> >         struct rv7xx_power_info *pi = rv770_get_pi(adev);<br>
> ><br>
> > -       WREG32(CG_FTV, pi->vrc);<br>
> > +       WREG32(ixCG_FTV, pi->vrc);<br>
> >  }<br>
> ><br>
> >  static void si_clear_vc(struct amdgpu_device *adev)<br>
> >  {<br>
> > -       WREG32(CG_FTV, 0);<br>
> > +       WREG32(ixCG_FTV, 0);<br>
> >  }<br>
> ><br>
> >  static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)<br>
> > @@ -4751,7 +4754,7 @@ static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,<br>
> >         u32 dram_rows;<br>
> >         u32 dram_refresh_rate;<br>
> >         u32 mc_arb_rfsh_rate;<br>
> > -       u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;<br>
> > +       u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT;<br>
> ><br>
> >         if (tmp >= 4)<br>
> >                 dram_rows = 16384;<br>
> > @@ -4925,7 +4928,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,<br>
> ><br>
> >         si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);<br>
> ><br>
> > -       reg = CG_R(0xffff) | CG_L(0);<br>
> > +       reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;<br>
> >         table-><a href="http://initialState.level.aT" rel="noreferrer noreferrer" target="_blank">initialState.level.aT</a> = cpu_to_be32(reg);<br>
> >         table->initialState.level.bSP = cpu_to_be32(pi->dsp);<br>
> >         table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;<br>
> > @@ -4951,10 +4954,13 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,<br>
> >         table->initialState.level.dpm2.BelowSafeInc = 0;<br>
> >         table->initialState.level.dpm2.PwrEfficiencyRatio = 0;<br>
> ><br>
> > -       reg = MIN_POWER_MASK | MAX_POWER_MASK;<br>
> > +       reg = SQ_POWER_THROTTLE__MIN_POWER_MASK |<br>
> > +               SQ_POWER_THROTTLE__MAX_POWER_MASK;<br>
> >         table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);<br>
> ><br>
> > -       reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;<br>
> > +       reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |<br>
> > +               SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |<br>
> > +               SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;<br>
> >         table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);<br>
> ><br>
> >         return 0;<br>
> > @@ -5073,8 +5079,8 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,<br>
> ><br>
> >         dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);<br>
> ><br>
> > -       spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;<br>
> > -       spll_func_cntl_2 |= SCLK_MUX_SEL(4);<br>
> > +       spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;<br>
> > +       spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;<br>
> ><br>
> >         table->ACPIState.level.mclk.vDLL_CNTL =<br>
> >                 cpu_to_be32(dll_cntl);<br>
> > @@ -5118,10 +5124,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,<br>
> >         table->ACPIState.level.dpm2.BelowSafeInc = 0;<br>
> >         table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;<br>
> ><br>
> > -       reg = MIN_POWER_MASK | MAX_POWER_MASK;<br>
> > +       reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK;<br>
> >         table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);<br>
> ><br>
> > -       reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;<br>
> > +       reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;<br>
> >         table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);<br>
> ><br>
> >         return 0;<br>
> > @@ -5266,8 +5272,8 @@ static int si_init_smc_table(struct amdgpu_device *adev)<br>
> >                 if (ret)<br>
> >                         return ret;<br>
> ><br>
> > -               WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);<br>
> > -               WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);<br>
> > +               WREG32(ixCG_ULV_CONTROL, ulv->cg_ulv_control);<br>
> > +               WREG32(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);<br>
> ><br>
> >                 lane_width = amdgpu_get_pcie_lanes(adev);<br>
> >                 si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);<br>
> > @@ -5310,16 +5316,16 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,<br>
> >         do_div(tmp, reference_clock);<br>
> >         fbdiv = (u32) tmp;<br>
> ><br>
> > -       spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);<br>
> > -       spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);<br>
> > -       spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);<br>
> > +       spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK);<br>
> > +       spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT;<br>
> > +       spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;<br>
> ><br>
> > -       spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;<br>
> > -       spll_func_cntl_2 |= SCLK_MUX_SEL(2);<br>
> > +       spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;<br>
> > +       spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;<br>
> ><br>
> > -       spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;<br>
> > -       spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);<br>
> > -       spll_func_cntl_3 |= SPLL_DITHEN;<br>
> > +       spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;<br>
> > +       spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;<br>
> > +       spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;<br>
> ><br>
> >         if (pi->sclk_ss) {<br>
> >                 struct amdgpu_atom_ss ss;<br>
> > @@ -5330,12 +5336,12 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,<br>
> >                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);<br>
> >                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);<br>
> ><br>
> > -                       cg_spll_spread_spectrum &= ~CLK_S_MASK;<br>
> > -                       cg_spll_spread_spectrum |= CLK_S(clk_s);<br>
> > -                       cg_spll_spread_spectrum |= SSEN;<br>
> > +                       cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK;<br>
> > +                       cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;<br>
> > +                       cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;<br>
> ><br>
> > -                       cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;<br>
> > -                       cg_spll_spread_spectrum_2 |= CLK_V(clk_v);<br>
> > +                       cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK;<br>
> > +                       cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;<br>
> >                 }<br>
> >         }<br>
> ><br>
> > @@ -5501,7 +5507,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,<br>
> >         if (pi->mclk_stutter_mode_threshold &&<br>
> >             (pl->mclk <= pi->mclk_stutter_mode_threshold) &&<br>
> >             !eg_pi->uvd_enabled &&<br>
> > -           (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&<br>
> > +           (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&<br>
> >             (adev->pm.dpm.new_active_crtc_count <= 2)) {<br>
> >                 level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;<br>
> >         }<br>
> > @@ -5595,7 +5601,7 @@ static int si_populate_smc_t(struct amdgpu_device *adev,<br>
> >                 return -EINVAL;<br>
> ><br>
> >         if (state->performance_level_count < 2) {<br>
> > -               a_t = CG_R(0xffff) | CG_L(0);<br>
> > +               a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;<br>
> >                 smc_state->levels[0].aT = cpu_to_be32(a_t);<br>
> >                 return 0;<br>
> >         }<br>
> > @@ -5616,13 +5622,13 @@ static int si_populate_smc_t(struct amdgpu_device *adev,<br>
> >                         t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;<br>
> >                 }<br>
> ><br>
> > -               a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;<br>
> > -               a_t |= CG_R(t_l * pi->bsp / 20000);<br>
> > +               a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK;<br>
> > +               a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT;<br>
> >                 smc_state->levels[i].aT = cpu_to_be32(a_t);<br>
> ><br>
> >                 high_bsp = (i == state->performance_level_count - 2) ?<br>
> >                         pi->pbsp : pi->bsp;<br>
> > -               a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);<br>
> > +               a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT;<br>
> >                 smc_state->levels[i + 1].aT = cpu_to_be32(a_t);<br>
> >         }<br>
> ><br>
> > @@ -6196,9 +6202,9 @@ static int si_upload_mc_reg_table(struct amdgpu_device *adev,<br>
> >  static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)<br>
> >  {<br>
> >         if (enable)<br>
> > -               WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);<br>
> > +               WREG32_P(ixGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);<br>
> >         else<br>
> > -               WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);<br>
> > +               WREG32_P(ixGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);<br>
> >  }<br>
> ><br>
> >  static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,<br>
> > @@ -6220,8 +6226,8 @@ static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)<br>
> >  {<br>
> >         u32 speed_cntl;<br>
> ><br>
> > -       speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;<br>
> > -       speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;<br>
> > +       speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;<br>
> > +       speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;<br>
> ><br>
> >         return (u16)speed_cntl;<br>
> >  }<br>
> > @@ -6428,21 +6434,21 @@ static void si_dpm_setup_asic(struct amdgpu_device *adev)<br>
> >  static int si_thermal_enable_alert(struct amdgpu_device *adev,<br>
> >                                    bool enable)<br>
> >  {<br>
> > -       u32 thermal_int = RREG32(CG_THERMAL_INT);<br>
> > +       u32 thermal_int = RREG32(ixCG_THERMAL_INT);<br>
> ><br>
> >         if (enable) {<br>
> >                 PPSMC_Result result;<br>
> ><br>
> > -               thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);<br>
> > -               WREG32(CG_THERMAL_INT, thermal_int);<br>
> > +               thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK);<br>
> > +               WREG32(ixCG_THERMAL_INT, thermal_int);<br>
> >                 result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);<br>
> >                 if (result != PPSMC_Result_OK) {<br>
> >                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");<br>
> >                         return -EINVAL;<br>
> >                 }<br>
> >         } else {<br>
> > -               thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;<br>
> > -               WREG32(CG_THERMAL_INT, thermal_int);<br>
> > +               thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;<br>
> > +               WREG32(ixCG_THERMAL_INT, thermal_int);<br>
> >         }<br>
> ><br>
> >         return 0;<br>
> > @@ -6463,9 +6469,9 @@ static int si_thermal_set_temperature_range(struct amdgpu_device *adev,<br>
> >                 return -EINVAL;<br>
> >         }<br>
> ><br>
> > -       WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);<br>
> > -       WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);<br>
> > -       WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);<br>
> > +       WREG32_P(ixCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK);<br>
> > +       WREG32_P(ixCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK);<br>
> > +       WREG32_P(ixCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK);<br>
> ><br>
> >         adev->pm.dpm.thermal.min_temp = low_temp;<br>
> >         adev->pm.dpm.thermal.max_temp = high_temp;<br>
> > @@ -6479,20 +6485,20 @@ static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)<br>
> >         u32 tmp;<br>
> ><br>
> >         if (si_pi->fan_ctrl_is_in_default_mode) {<br>
> > -               tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;<br>
> > +               tmp = (RREG32(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;<br>
> >                 si_pi->fan_ctrl_default_mode = tmp;<br>
> > -               tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;<br>
> > +               tmp = (RREG32(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT;<br>
> >                 si_pi->t_min = tmp;<br>
> >                 si_pi->fan_ctrl_is_in_default_mode = false;<br>
> >         }<br>
> ><br>
> > -       tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;<br>
> > -       tmp |= TMIN(0);<br>
> > -       WREG32(CG_FDO_CTRL2, tmp);<br>
> > +       tmp = RREG32(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;<br>
> > +       tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;<br>
> > +       WREG32(ixCG_FDO_CTRL2, tmp);<br>
> ><br>
> > -       tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;<br>
> > -       tmp |= FDO_PWM_MODE(mode);<br>
> > -       WREG32(CG_FDO_CTRL2, tmp);<br>
> > +       tmp = RREG32(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;<br>
> > +       tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;<br>
> > +       WREG32(ixCG_FDO_CTRL2, tmp);<br>
> >  }<br>
> ><br>
> >  static int si_thermal_setup_fan_table(struct amdgpu_device *adev)<br>
> > @@ -6511,7 +6517,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)<br>
> >                 return 0;<br>
> >         }<br>
> ><br>
> > -       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;<br>
> > +       duty100 = (RREG32(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;<br>
> ><br>
> >         if (duty100 == 0) {<br>
> >                 adev->pm.dpm.fan.ucode_fan_control = false;<br>
> > @@ -6547,7 +6553,7 @@ static int si_thermal_setup_fan_table(struct amdgpu_device *adev)<br>
> >                                                 reference_clock) / 1600);<br>
> >         fan_table.fdo_max = cpu_to_be16((u16)duty100);<br>
> ><br>
> > -       tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;<br>
> > +       tmp = (RREG32(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;<br>
> >         fan_table.temp_src = (uint8_t)tmp;<br>
> ><br>
> >         ret = amdgpu_si_copy_bytes_to_smc(adev,<br>
> > @@ -6606,8 +6612,8 @@ static int si_dpm_get_fan_speed_pwm(void *handle,<br>
> >         if (adev->pm.no_fan)<br>
> >                 return -ENOENT;<br>
> ><br>
> > -       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;<br>
> > -       duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;<br>
> > +       duty100 = (RREG32(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;<br>
> > +       duty = (RREG32(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;<br>
> ><br>
> >         if (duty100 == 0)<br>
> >                 return -EINVAL;<br>
> > @@ -6637,7 +6643,7 @@ static int si_dpm_set_fan_speed_pwm(void *handle,<br>
> >         if (speed > 255)<br>
> >                 return -EINVAL;<br>
> ><br>
> > -       duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;<br>
> > +       duty100 = (RREG32(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;<br>
> ><br>
> >         if (duty100 == 0)<br>
> >                 return -EINVAL;<br>
> > @@ -6646,9 +6652,9 @@ static int si_dpm_set_fan_speed_pwm(void *handle,<br>
> >         do_div(tmp64, 255);<br>
> >         duty = (u32)tmp64;<br>
> ><br>
> > -       tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;<br>
> > -       tmp |= FDO_STATIC_DUTY(duty);<br>
> > -       WREG32(CG_FDO_CTRL0, tmp);<br>
> > +       tmp = RREG32(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;<br>
> > +       tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;<br>
> > +       WREG32(ixCG_FDO_CTRL0, tmp);<br>
> ><br>
> >         return 0;<br>
> >  }<br>
> > @@ -6688,8 +6694,8 @@ static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)<br>
> >         if (si_pi->fan_is_controlled_by_smc)<br>
> >                 return 0;<br>
> ><br>
> > -       tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;<br>
> > -       *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);<br>
> > +       tmp = RREG32(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;<br>
> > +       *fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);<br>
> ><br>
> >         return 0;<br>
> >  }<br>
> > @@ -6707,7 +6713,7 @@ static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,<br>
> >         if (adev->pm.fan_pulses_per_revolution == 0)<br>
> >                 return -ENOENT;<br>
> ><br>
> > -       tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;<br>
> > +       tach_period = (RREG32(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;<br>
> >         if (tach_period == 0)<br>
> >                 return -ENOENT;<br>
> ><br>
> > @@ -6736,9 +6742,9 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,<br>
> >                 si_fan_ctrl_stop_smc_fan_control(adev);<br>
> ><br>
> >         tach_period = 60 * xclk * 10000 / (8 * speed);<br>
> > -       tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;<br>
> > -       tmp |= TARGET_PERIOD(tach_period);<br>
> > -       WREG32(CG_TACH_CTRL, tmp);<br>
> > +       tmp = RREG32(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;<br>
> > +       tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;<br>
> > +       WREG32(ixCG_TACH_CTRL, tmp);<br>
> ><br>
> >         si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);<br>
> ><br>
> > @@ -6752,13 +6758,13 @@ static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)<br>
> >         u32 tmp;<br>
> ><br>
> >         if (!si_pi->fan_ctrl_is_in_default_mode) {<br>
> > -               tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;<br>
> > -               tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);<br>
> > -               WREG32(CG_FDO_CTRL2, tmp);<br>
> > +               tmp = RREG32(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;<br>
> > +               tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;<br>
> > +               WREG32(ixCG_FDO_CTRL2, tmp);<br>
> ><br>
> > -               tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;<br>
> > -               tmp |= TMIN(si_pi->t_min);<br>
> > -               WREG32(CG_FDO_CTRL2, tmp);<br>
> > +               tmp = RREG32(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;<br>
> > +               tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;<br>
> > +               WREG32(ixCG_FDO_CTRL2, tmp);<br>
> >                 si_pi->fan_ctrl_is_in_default_mode = true;<br>
> >         }<br>
> >  }<br>
> > @@ -6776,14 +6782,14 @@ static void si_thermal_initialize(struct amdgpu_device *adev)<br>
> >         u32 tmp;<br>
> ><br>
> >         if (adev->pm.fan_pulses_per_revolution) {<br>
> > -               tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;<br>
> > -               tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);<br>
> > -               WREG32(CG_TACH_CTRL, tmp);<br>
> > +               tmp = RREG32(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;<br>
> > +               tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;<br>
> > +               WREG32(ixCG_TACH_CTRL, tmp);<br>
> >         }<br>
> ><br>
> > -       tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;<br>
> > -       tmp |= TACH_PWM_RESP_RATE(0x28);<br>
> > -       WREG32(CG_FDO_CTRL2, tmp);<br>
> > +       tmp = RREG32(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;<br>
> > +       tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;<br>
> > +       WREG32(ixCG_FDO_CTRL2, tmp);<br>
> >  }<br>
> ><br>
> >  static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)<br>
> > @@ -7546,8 +7552,8 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle,<br>
> >         struct  si_ps *ps = si_get_ps(rps);<br>
> >         struct rv7xx_pl *pl;<br>
> >         u32 current_index =<br>
> > -               (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >><br>
> > -               CURRENT_STATE_INDEX_SHIFT;<br>
> > +               (RREG32(ixTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >><br>
> > +                       TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;<br>
> ><br>
> >         if (current_index >= ps->performance_level_count) {<br>
> >                 seq_printf(m, "invalid dpm profile %d\n", current_index);<br>
> > @@ -7570,14 +7576,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,<br>
> >         case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:<br>
> >                 switch (state) {<br>
> >                 case AMDGPU_IRQ_STATE_DISABLE:<br>
> > -                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);<br>
> > -                       cg_thermal_int |= THERM_INT_MASK_HIGH;<br>
> > -                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);<br>
> > +                       cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);<br>
> > +                       cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;<br>
> > +                       WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);<br>
> >                         break;<br>
> >                 case AMDGPU_IRQ_STATE_ENABLE:<br>
> > -                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);<br>
> > -                       cg_thermal_int &= ~THERM_INT_MASK_HIGH;<br>
> > -                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);<br>
> > +                       cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);<br>
> > +                       cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;<br>
> > +                       WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);<br>
> >                         break;<br>
> >                 default:<br>
> >                         break;<br>
> > @@ -7587,14 +7593,14 @@ static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,<br>
> >         case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:<br>
> >                 switch (state) {<br>
> >                 case AMDGPU_IRQ_STATE_DISABLE:<br>
> > -                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);<br>
> > -                       cg_thermal_int |= THERM_INT_MASK_LOW;<br>
> > -                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);<br>
> > +                       cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);<br>
> > +                       cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;<br>
> > +                       WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);<br>
> >                         break;<br>
> >                 case AMDGPU_IRQ_STATE_ENABLE:<br>
> > -                       cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);<br>
> > -                       cg_thermal_int &= ~THERM_INT_MASK_LOW;<br>
> > -                       WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);<br>
> > +                       cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);<br>
> > +                       cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;<br>
> > +                       WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);<br>
> >                         break;<br>
> >                 default:<br>
> >                         break;<br>
> > @@ -7884,8 +7890,8 @@ static int si_dpm_get_temp(void *handle)<br>
> >         int actual_temp = 0;<br>
> >         struct amdgpu_device *adev = (struct amdgpu_device *)handle;<br>
> ><br>
> > -       temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >><br>
> > -               CTF_TEMP_SHIFT;<br>
> > +       temp = (RREG32(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >><br>
> > +               CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;<br>
> ><br>
> >         if (temp & 0x200)<br>
> >                 actual_temp = 255;<br>
> > @@ -8015,8 +8021,8 @@ static int si_dpm_read_sensor(void *handle, int idx,<br>
> >         struct  si_ps *ps = si_get_ps(rps);<br>
> >         uint32_t sclk, mclk;<br>
> >         u32 pl_index =<br>
> > -               (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >><br>
> > -               CURRENT_STATE_INDEX_SHIFT;<br>
> > +               (RREG32(ixTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >><br>
> > +               TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;<br>
> ><br>
> >         /* size must be at least 4 bytes for all sensors */<br>
> >         if (*size < 4)<br>
> > diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c<br>
> > index c712899c44ca..4e65ab9e931c 100644<br>
> > --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c<br>
> > +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c<br>
> > @@ -44,8 +44,8 @@ static int si_set_smc_sram_address(struct amdgpu_device *adev,<br>
> >         if ((smc_address + 3) > limit)<br>
> >                 return -EINVAL;<br>
> ><br>
> > -       WREG32(SMC_IND_INDEX_0, smc_address);<br>
> > -       WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);<br>
> > +       WREG32(mmSMC_IND_INDEX_0, smc_address);<br>
> > +       WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);<br>
> ><br>
> >         return 0;<br>
> >  }<br>
> > @@ -74,7 +74,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,<br>
> >                 if (ret)<br>
> >                         goto done;<br>
> ><br>
> > -               WREG32(SMC_IND_DATA_0, data);<br>
> > +               WREG32(mmSMC_IND_DATA_0, data);<br>
> ><br>
> >                 src += 4;<br>
> >                 byte_count -= 4;<br>
> > @@ -89,7 +89,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,<br>
> >                 if (ret)<br>
> >                         goto done;<br>
> ><br>
> > -               original_data = RREG32(SMC_IND_DATA_0);<br>
> > +               original_data = RREG32(mmSMC_IND_DATA_0);<br>
> >                 extra_shift = 8 * (4 - byte_count);<br>
> ><br>
> >                 while (byte_count > 0) {<br>
> > @@ -105,7 +105,7 @@ int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev,<br>
> >                 if (ret)<br>
> >                         goto done;<br>
> ><br>
> > -               WREG32(SMC_IND_DATA_0, data);<br>
> > +               WREG32(mmSMC_IND_DATA_0, data);<br>
> >         }<br>
> ><br>
> >  done:<br>
> > @@ -127,10 +127,10 @@ void amdgpu_si_reset_smc(struct amdgpu_device *adev)<br>
> >  {<br>
> >         u32 tmp;<br>
> ><br>
> > -       RREG32(CB_CGTT_SCLK_CTRL);<br>
> > -       RREG32(CB_CGTT_SCLK_CTRL);<br>
> > -       RREG32(CB_CGTT_SCLK_CTRL);<br>
> > -       RREG32(CB_CGTT_SCLK_CTRL);<br>
> > +       RREG32(mmCB_CGTT_SCLK_CTRL);<br>
> > +       RREG32(mmCB_CGTT_SCLK_CTRL);<br>
> > +       RREG32(mmCB_CGTT_SCLK_CTRL);<br>
> > +       RREG32(mmCB_CGTT_SCLK_CTRL);<br>
> ><br>
> >         tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) |<br>
> >               RST_REG;<br>
> > @@ -176,16 +176,16 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,<br>
> >         if (!amdgpu_si_is_smc_running(adev))<br>
> >                 return PPSMC_Result_Failed;<br>
> ><br>
> > -       WREG32(SMC_MESSAGE_0, msg);<br>
> > +       WREG32(mmSMC_MESSAGE_0, msg);<br>
> ><br>
> >         for (i = 0; i < adev->usec_timeout; i++) {<br>
> > -               tmp = RREG32(SMC_RESP_0);<br>
> > +               tmp = RREG32(mmSMC_RESP_0);<br>
> >                 if (tmp != 0)<br>
> >                         break;<br>
> >                 udelay(1);<br>
> >         }<br>
> ><br>
> > -       return (PPSMC_Result)RREG32(SMC_RESP_0);<br>
> > +       return (PPSMC_Result)RREG32(mmSMC_RESP_0);<br>
> >  }<br>
> ><br>
> >  PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)<br>
> > @@ -231,18 +231,18 @@ int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit)<br>
> >                 return -EINVAL;<br>
> ><br>
> >         spin_lock_irqsave(&adev->smc_idx_lock, flags);<br>
> > -       WREG32(SMC_IND_INDEX_0, ucode_start_address);<br>
> > -       WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);<br>
> > +       WREG32(mmSMC_IND_INDEX_0, ucode_start_address);<br>
> > +       WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);<br>
> >         while (ucode_size >= 4) {<br>
> >                 /* SMC address space is BE */<br>
> >                 data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];<br>
> ><br>
> > -               WREG32(SMC_IND_DATA_0, data);<br>
> > +               WREG32(mmSMC_IND_DATA_0, data);<br>
> ><br>
> >                 src += 4;<br>
> >                 ucode_size -= 4;<br>
> >         }<br>
> > -       WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);<br>
> > +       WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);<br>
> >         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);<br>
> ><br>
> >         return 0;<br>
> > @@ -257,7 +257,7 @@ int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,<br>
> >         spin_lock_irqsave(&adev->smc_idx_lock, flags);<br>
> >         ret = si_set_smc_sram_address(adev, smc_address, limit);<br>
> >         if (ret == 0)<br>
> > -               *value = RREG32(SMC_IND_DATA_0);<br>
> > +               *value = RREG32(mmSMC_IND_DATA_0);<br>
> >         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);<br>
> ><br>
> >         return ret;<br>
> > @@ -272,7 +272,7 @@ int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,<br>
> >         spin_lock_irqsave(&adev->smc_idx_lock, flags);<br>
> >         ret = si_set_smc_sram_address(adev, smc_address, limit);<br>
> >         if (ret == 0)<br>
> > -               WREG32(SMC_IND_DATA_0, value);<br>
> > +               WREG32(mmSMC_IND_DATA_0, value);<br>
> >         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);<br>
> ><br>
> >         return ret;<br>
> > --<br>
> > 2.48.1<br>
> ><br></div></div>