[PATCH 2/2] drm/amd/powerplay: correct the supported pcie GenSpeed and LaneCount
Evan Quan
evan.quan at amd.com
Fri Jul 3 08:55:15 UTC 2020
The LCLK dpm table setup should be performed in .update_pcie_parameters().
Otherwise, the updated GenSpeed and LaneCount information will be lost.
Change-Id: I028c26ca0e54098cb93d9e9266719f1762ba2d7e
Signed-off-by: Evan Quan <evan.quan at amd.com>
---
drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 17 +++++++----------
.../gpu/drm/amd/powerplay/sienna_cichlid_ppt.c | 17 +++++++----------
2 files changed, 14 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 3db5e663aa6f..97d14539c95e 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -693,7 +693,6 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
struct smu_11_0_dpm_table *dpm_table;
int ret = 0;
- int i;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
@@ -857,12 +856,6 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- for (i = 0; i < MAX_PCIE_CONF; i++) {
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
- }
-
return 0;
}
@@ -1936,12 +1929,16 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- int ret, i;
uint32_t smu_pcie_arg;
+ int ret, i;
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ /* lclk dpm table setup */
+ for (i = 0; i < MAX_PCIE_CONF; i++) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pptable->PcieLaneCount[i];
+ }
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) |
diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
index 7a108676f90a..46be02e4b93c 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
@@ -601,7 +601,6 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
struct smu_11_0_dpm_table *dpm_table;
int ret = 0;
- int i;
/* socclk dpm table setup */
dpm_table = &dpm_context->dpm_tables.soc_table;
@@ -819,12 +818,6 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* lclk dpm table setup */
- for (i = 0; i < MAX_PCIE_CONF; i++) {
- dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
- dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
- }
-
return 0;
}
@@ -1722,12 +1715,16 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
+ struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- int ret, i;
uint32_t smu_pcie_arg;
+ int ret, i;
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ /* lclk dpm table setup */
+ for (i = 0; i < MAX_PCIE_CONF; i++) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pptable->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pptable->PcieLaneCount[i];
+ }
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) |
--
2.27.0
More information about the amd-gfx
mailing list