[PATCH 04/10] drm/i915/xe3lpd: Update pmdemand programming

Gustavo Sousa gustavo.sousa at intel.com
Wed Oct 9 13:53:08 UTC 2024


Quoting Govindapillai, Vinod (2024-10-09 10:09:45-03:00)
>Hi Matt,
>
>Probably you missed one change...
>
>On Tue, 2024-10-08 at 15:37 -0700, Matt Atwood wrote:
>> From: Matt Roper <matthew.d.roper at intel.com>
>> 
>> There are some minor changes to pmdemand handling on Xe3:
>>  - Active scalers are no longer tracked.  We can simply skip the readout
>>    and programming of this field.
>>  - Active dbuf slices are no longer tracked.  We should skip the readout
>>    and programming of this field and also make sure that it stays 0 in
>>    our software bookkeeping so that we won't erroneously return true
>>    from intel_pmdemand_needs_update() due to mismatches.
>>  - Even though there aren't enough pipes to utilize them, the size of
>>    the 'active pipes' field has expanded to four bits, taking over the
>>    register bits previously used for dbuf slices.  Since the lower bits
>>    of the mask have moved, we need to update our reads/writes to handle
>>    this properly.
>> 
>> Bspec: 68883, 69125
>> Signed-off-by: Matt Roper <matthew.d.roper at intel.com>
>> Signed-off-by: Matt Atwood <matthew.s.atwood at intel.com>
>> ---
>>  drivers/gpu/drm/i915/display/intel_pmdemand.c | 61 +++++++++++++------
>>  drivers/gpu/drm/i915/display/intel_pmdemand.h |  4 +-
>>  drivers/gpu/drm/i915/i915_reg.h               |  1 +
>>  3 files changed, 45 insertions(+), 21 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c
>> b/drivers/gpu/drm/i915/display/intel_pmdemand.c
>> index ceaf9e3147da..9af2f83d3a75 100644
>> --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
>> +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
>> @@ -258,6 +258,7 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
>>  
>>  static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
>>  {
>> +       struct drm_i915_private *i915 = to_i915(state->base.dev);
>>         const struct intel_bw_state *new_bw_state, *old_bw_state;
>>         const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
>>         const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
>> @@ -274,12 +275,16 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
>>         new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
>>         old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
>>         if (new_dbuf_state &&
>> -           (new_dbuf_state->active_pipes !=
>> -            old_dbuf_state->active_pipes ||
>> -            new_dbuf_state->enabled_slices !=
>> -            old_dbuf_state->enabled_slices))
>> +           new_dbuf_state->active_pipes != old_dbuf_state->active_pipes)
>>                 return true;
>>  
>> +       if (DISPLAY_VER(i915) < 30) {
>> +               if (new_dbuf_state &&
>> +                   new_dbuf_state->enabled_slices !=
>> +                   old_dbuf_state->enabled_slices)
>> +                       return true;
>> +       }
>> +
>>         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
>>         old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
>>         if (new_cdclk_state &&
>> @@ -329,8 +334,10 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
>>  
>>         new_pmdemand_state->params.active_pipes =
>>                 min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
>In xe3, min could be 4  (11b for 3 pipes and 100b for 4 pipes.)

Good catch!

In this case, we could either:

- Apply min_t() only for pre-xe3 and just use the value directly for
  xe3. Bspec mentions that Pcode should clamp to the maximum defined
  number of pipes.
- Define a local max_active_pipes variable, using 3 for pre-xe3 and the
  number of possible pipes for xe3. Then use that variable in min_t().

I would prefer the latter, which does what Pcode is also supposed to do.
This little redundancy doesn't hurt and make things safer.

--
Gustavo Sousa

>
>BR
>vinod
>
>> -       new_pmdemand_state->params.active_dbufs =
>> -               min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
>> +
>> +       if (DISPLAY_VER(i915) < 30)
>> +               new_pmdemand_state->params.active_dbufs =
>> +                       min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
>>  
>>         new_cdclk_state = intel_atomic_get_cdclk_state(state);
>>         if (IS_ERR(new_cdclk_state))
>> @@ -395,27 +402,32 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
>>  
>>         reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
>>  
>> -       /* Set 1*/
>>         pmdemand_state->params.qclk_gv_bw =
>>                 REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
>>         pmdemand_state->params.voltage_index =
>>                 REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
>>         pmdemand_state->params.qclk_gv_index =
>>                 REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
>> -       pmdemand_state->params.active_pipes =
>> -               REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
>> -       pmdemand_state->params.active_dbufs =
>> -               REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
>>         pmdemand_state->params.active_phys =
>>                 REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
>>  
>> -       /* Set 2*/
>>         pmdemand_state->params.cdclk_freq_mhz =
>>                 REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
>>         pmdemand_state->params.ddiclk_max =
>>                 REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
>> -       pmdemand_state->params.scalers =
>> -               REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
>> +
>> +       if (DISPLAY_VER(i915) >= 30) {
>> +               pmdemand_state->params.active_pipes =
>> +                       REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1);
>> +       } else {
>> +               pmdemand_state->params.active_pipes =
>> +                       REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
>> +               pmdemand_state->params.active_dbufs =
>> +                       REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
>> +
>> +               pmdemand_state->params.scalers =
>> +                       REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
>> +       }
>>  
>>  unlock:
>>         mutex_unlock(&i915->display.pmdemand.lock);
>> @@ -442,6 +454,10 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
>>  {
>>         u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
>>  
>> +       /* PM Demand only tracks active dbufs on pre-Xe3 platforms */
>> +       if (DISPLAY_VER(i915) >= 30)
>> +               return;
>> +
>>         mutex_lock(&i915->display.pmdemand.lock);
>>         if (drm_WARN_ON(&i915->drm,
>>                         !intel_pmdemand_check_prev_transaction(i915)))
>> @@ -460,7 +476,8 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
>>  }
>>  
>>  static void
>> -intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
>> +intel_pmdemand_update_params(struct drm_i915_private *i915,
>> +                            const struct intel_pmdemand_state *new,
>>                              const struct intel_pmdemand_state *old,
>>                              u32 *reg1, u32 *reg2, bool serialized)
>>  {
>> @@ -495,16 +512,22 @@ intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
>>         update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
>>         update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
>>         update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
>> -       update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
>> -       update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
>>         update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
>>  
>>         /* Set 2*/
>>         update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
>>         update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
>> -       update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
>>         update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
>>  
>> +       if (DISPLAY_VER(i915) >= 30) {
>> +               update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK);
>> +       } else {
>> +               update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
>> +               update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
>> +
>> +               update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
>> +       }
>> +
>>  #undef update_reg
>>  }
>>  
>> @@ -529,7 +552,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915,
>>         reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
>>         mod_reg2 = reg2;
>>  
>> -       intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2,
>> +       intel_pmdemand_update_params(i915, new, old, &mod_reg1, &mod_reg2,
>>                                      serialized);
>>  
>>         if (reg1 != mod_reg1) {
>> diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h
>> b/drivers/gpu/drm/i915/display/intel_pmdemand.h
>> index 128fd61f8f14..a1c49efdc493 100644
>> --- a/drivers/gpu/drm/i915/display/intel_pmdemand.h
>> +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h
>> @@ -20,14 +20,14 @@ struct pmdemand_params {
>>         u8 voltage_index;
>>         u8 qclk_gv_index;
>>         u8 active_pipes;
>> -       u8 active_dbufs;
>> +       u8 active_dbufs;        /* pre-Xe3 only */
>>         /* Total number of non type C active phys from active_phys_mask */
>>         u8 active_phys;
>>         u8 plls;
>>         u16 cdclk_freq_mhz;
>>         /* max from ddi_clocks[] */
>>         u16 ddiclk_max;
>> -       u8 scalers;
>> +       u8 scalers;             /* pre-Xe3 only */
>>  };
>>  
>>  struct intel_pmdemand_state {
>> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
>> index 818142f5a10c..d30459f8d1cb 100644
>> --- a/drivers/gpu/drm/i915/i915_reg.h
>> +++ b/drivers/gpu/drm/i915/i915_reg.h
>> @@ -2705,6 +2705,7 @@
>>  #define  XELPDP_PMDEMAND_QCLK_GV_BW_MASK               REG_GENMASK(31, 16)
>>  #define  XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK            REG_GENMASK(14, 12)
>>  #define  XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK            REG_GENMASK(11, 8)
>> +#define  XE3_PMDEMAND_PIPES_MASK                       REG_GENMASK(7, 4)
>>  #define  XELPDP_PMDEMAND_PIPES_MASK                    REG_GENMASK(7, 6)
>>  #define  XELPDP_PMDEMAND_DBUFS_MASK                    REG_GENMASK(5, 4)
>>  #define  XELPDP_PMDEMAND_PHYS_MASK                     REG_GENMASK(2, 0)
>


More information about the Intel-xe mailing list