[PATCH] drm/amd/pm: conditionally disable pcie lane switching for some sienna_cichlid SKUs
Quan, Evan
Evan.Quan at amd.com
Mon Apr 24 07:33:27 UTC 2023
[Public]
> -----Original Message-----
> From: Limonciello, Mario <Mario.Limonciello at amd.com>
> Sent: Friday, April 21, 2023 9:40 PM
> To: Quan, Evan <Evan.Quan at amd.com>; amd-gfx at lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher at amd.com>
> Subject: RE: [PATCH] drm/amd/pm: conditionally disable pcie lane switching
> for some sienna_cichlid SKUs
>
> [Public]
>
>
>
> > -----Original Message-----
> > From: Quan, Evan <Evan.Quan at amd.com>
> > Sent: Friday, April 21, 2023 02:29
> > To: amd-gfx at lists.freedesktop.org
> > Cc: Deucher, Alexander <Alexander.Deucher at amd.com>; Limonciello,
> Mario
> > <Mario.Limonciello at amd.com>; Quan, Evan <Evan.Quan at amd.com>
> > Subject: [PATCH] drm/amd/pm: conditionally disable pcie lane switching
> > for some sienna_cichlid SKUs
> >
> > Disable the pcie lane switching for some sienna_cichlid SKUs since it
> > might not work well on some platforms.
> >
> > Signed-off-by: Evan Quan <evan.quan at amd.com>
> > Change-Id: Iea9ceaa146c8706768ee077c10e5d33bce9bc1c2
>
> You can drop the Gerrit Change-Id here
Sure, thanks.
Evan
>
> > ---
> > .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 92
> +++++++++++++++----
> > 1 file changed, 74 insertions(+), 18 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> > b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> > index 4b91cdc3eaa0..e7223513e384 100644
> > --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> > +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> > @@ -2067,33 +2067,94 @@ static int
> > sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
> > return ret;
> > }
> >
> > +static void sienna_cichlid_get_override_pcie_settings(struct
> > +smu_context
> > *smu,
> > + uint32_t
> > *gen_speed_override,
> > + uint32_t
> > *lane_width_override)
> > +{
> > + struct amdgpu_device *adev = smu->adev;
> > +
> > + *gen_speed_override = 0xff;
> > + *lane_width_override = 0xff;
> > +
> > + switch (adev->pdev->device) {
> > + case 0x73A0:
> > + case 0x73A1:
> > + case 0x73A2:
> > + case 0x73A3:
> > + case 0x73AB:
> > + case 0x73AE:
> > + /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
> > + *lane_width_override = 6;
> > + break;
> > + case 0x73E0:
> > + case 0x73E1:
> > + case 0x73E3:
> > + *lane_width_override = 4;
> > + break;
> > + case 0x7420:
> > + case 0x7421:
> > + case 0x7422:
> > + case 0x7423:
> > + case 0x7424:
> > + *lane_width_override = 3;
> > + break;
> > + default:
> > + break;
> > + }
> > +}
> > +
> > +#define MAX(a, b) ((a) > (b) ? (a) : (b))
> > +
> > static int sienna_cichlid_update_pcie_parameters(struct smu_context
> *smu,
> > uint32_t pcie_gen_cap,
> > uint32_t pcie_width_cap)
> > {
> > struct smu_11_0_dpm_context *dpm_context = smu-
> > >smu_dpm.dpm_context;
> > -
> > - uint32_t smu_pcie_arg;
> > + struct smu_11_0_pcie_table *pcie_table = &dpm_context-
> > >dpm_tables.pcie_table;
> > + uint32_t gen_speed_override, lane_width_override;
> > uint8_t *table_member1, *table_member2;
> > + uint32_t min_gen_speed, max_gen_speed;
> > + uint32_t min_lane_width, max_lane_width;
> > + uint32_t smu_pcie_arg;
> > int ret, i;
> >
> > GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
> > GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
> >
> > - /* lclk dpm table setup */
> > - for (i = 0; i < MAX_PCIE_CONF; i++) {
> > - dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
> > table_member1[i];
> > - dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
> > table_member2[i];
> > + sienna_cichlid_get_override_pcie_settings(smu,
> > + &gen_speed_override,
> > + &lane_width_override);
> > +
> > + /* PCIE gen speed override */
> > + if (gen_speed_override != 0xff) {
> > + min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
> > + max_gen_speed = MIN(pcie_gen_cap,
> gen_speed_override);
> > + } else {
> > + min_gen_speed = MAX(0, table_member1[0]);
> > + max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
> > + min_gen_speed = min_gen_speed > max_gen_speed ?
> > + max_gen_speed : min_gen_speed;
> > }
> > + pcie_table->pcie_gen[0] = min_gen_speed;
> > + pcie_table->pcie_gen[1] = max_gen_speed;
> > +
> > + /* PCIE lane width override */
> > + if (lane_width_override != 0xff) {
> > + min_lane_width = MIN(pcie_width_cap,
> lane_width_override);
> > + max_lane_width = MIN(pcie_width_cap,
> lane_width_override);
> > + } else {
> > + min_lane_width = MAX(1, table_member2[0]);
> > + max_lane_width = MIN(pcie_width_cap, table_member2[1]);
> > + min_lane_width = min_lane_width > max_lane_width ?
> > + max_lane_width : min_lane_width;
> > + }
> > + pcie_table->pcie_lane[0] = min_lane_width;
> > + pcie_table->pcie_lane[1] = max_lane_width;
> >
> > for (i = 0; i < NUM_LINK_LEVELS; i++) {
> > - smu_pcie_arg = (i << 16) |
> > - ((table_member1[i] <= pcie_gen_cap) ?
> > - (table_member1[i] << 8) :
> > - (pcie_gen_cap << 8)) |
> > - ((table_member2[i] <= pcie_width_cap) ?
> > - table_member2[i] :
> > - pcie_width_cap);
> > + smu_pcie_arg = (i << 16 |
> > + pcie_table->pcie_gen[i] << 8 |
> > + pcie_table->pcie_lane[i]);
> >
> > ret = smu_cmn_send_smc_msg_with_param(smu,
> > SMU_MSG_OverridePcieParameters,
> > @@ -2101,11 +2162,6 @@ static int
> > sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
> > NULL);
> > if (ret)
> > return ret;
> > -
> > - if (table_member1[i] > pcie_gen_cap)
> > - dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
> > pcie_gen_cap;
> > - if (table_member2[i] > pcie_width_cap)
> > - dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
> > pcie_width_cap;
> > }
> >
> > return 0;
> > --
> > 2.34.1
More information about the amd-gfx
mailing list