[PATCH] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU V2
Wang, Kevin(Yang)
Kevin1.Wang at amd.com
Sun Jan 5 06:11:14 UTC 2020
[AMD Official Use Only - Internal Distribution Only]
________________________________
From: amd-gfx <amd-gfx-bounces at lists.freedesktop.org> on behalf of Evan Quan <evan.quan at amd.com>
Sent: Thursday, January 2, 2020 10:39 AM
To: amd-gfx at lists.freedesktop.org <amd-gfx at lists.freedesktop.org>
Cc: Deucher, Alexander <Alexander.Deucher at amd.com>; Quan, Evan <Evan.Quan at amd.com>
Subject: [PATCH] drm/amd/powerplay: unified VRAM address for driver table interaction with SMU V2
By this, we can avoid to pass in the VRAM address on every table
transferring. That puts extra unnecessary traffics on SMU on
some cases(e.g. polling the amdgpu_pm_info sysfs interface).
V2: document what the driver table is for and how it works
Change-Id: Ifb74d9cd89790b301e88d472b29cdb9b0365b65a
Signed-off-by: Evan Quan <evan.quan at amd.com>
---
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 98 ++++++++++++-------
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 3 +-
.../gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 10 ++
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h | 2 +
drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h | 2 +
drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 1 +
drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 1 +
drivers/gpu/drm/amd/powerplay/smu_internal.h | 2 +
drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 ++++
drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 26 +++--
drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 1 +
11 files changed, 117 insertions(+), 47 deletions(-)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 95238ad38de8..beea4d9e82d4 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -519,26 +519,19 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
- struct smu_table *table = NULL;
- int ret = 0;
+ struct smu_table *table = &smu_table->driver_table;
int table_id = smu_table_get_index(smu, table_index);
+ uint32_t table_size;
+ int ret = 0;
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
- table = &smu_table->tables[table_index];
+ table_size = smu_table->tables[table_index].size;
if (drv2smu)
- memcpy(table->cpu_addr, table_data, table->size);
+ memcpy(table->cpu_addr, table_data, table_size);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(table->mc_address));
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(table->mc_address));
- if (ret)
- return ret;
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
@@ -550,7 +543,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
adev->nbio.funcs->hdp_flush(adev, NULL);
if (!drv2smu)
- memcpy(table_data, table->cpu_addr, table->size);
+ memcpy(table_data, table->cpu_addr, table_size);
return ret;
}
@@ -976,32 +969,56 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct smu_table *driver_table = &(smu_table->driver_table);
+ uint32_t max_table_size = 0;
int ret, i;
- for (i = 0; i < SMU_TABLE_COUNT; i++) {
- if (tables[i].size == 0)
- continue;
+ /* VRAM allocation for tool table */
+ if (tables[SMU_TABLE_PMSTATUSLOG].size) {
ret = amdgpu_bo_create_kernel(adev,
- tables[i].size,
- tables[i].align,
- tables[i].domain,
- &tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
- if (ret)
- goto failed;
+ tables[SMU_TABLE_PMSTATUSLOG].size,
+ tables[SMU_TABLE_PMSTATUSLOG].align,
+ tables[SMU_TABLE_PMSTATUSLOG].domain,
+ &tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+ if (ret) {
+ pr_err("VRAM allocation for tool table failed!\n");
+ return ret;
+ }
}
- return 0;
-failed:
- while (--i >= 0) {
+ /* VRAM allocation for driver table */
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
- amdgpu_bo_free_kernel(&tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
+ if (i == SMU_TABLE_PMSTATUSLOG)
+ continue;
+
+ if (max_table_size < tables[i].size)
+ max_table_size = tables[i].size;
+ }
+
+ driver_table->size = max_table_size;
+ driver_table->align = PAGE_SIZE;
+ driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ driver_table->size,
+ driver_table->align,
+ driver_table->domain,
+ &driver_table->bo,
+ &driver_table->mc_address,
+ &driver_table->cpu_addr);
+ if (ret) {
+ pr_err("VRAM allocation for driver table failed!\n");
+ if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+ amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
}
+
return ret;
}
@@ -1009,18 +1026,19 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t i = 0;
+ struct smu_table *driver_table = &(smu_table->driver_table);
if (!tables)
return 0;
- for (i = 0; i < SMU_TABLE_COUNT; i++) {
- if (tables[i].size == 0)
- continue;
- amdgpu_bo_free_kernel(&tables[i].bo,
- &tables[i].mc_address,
- &tables[i].cpu_addr);
- }
+ if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
+ amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
+ &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
+ &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
+
+ amdgpu_bo_free_kernel(&driver_table->bo,
+ &driver_table->mc_address,
+ &driver_table->cpu_addr);
return 0;
}
@@ -1091,6 +1109,10 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
/* smu_dump_pptable(smu); */
+ ret = smu_set_driver_table_location(smu);
+ if (ret)
+ return ret;
+
/*
* Copy pptable bo in the vram to smc with SMU MSGs such as
* SetDriverDramAddr and TransferTableDram2Smu.
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 50b317f4b1e6..064b5201a8a7 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -2022,7 +2022,7 @@ static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
SwI2cRequest_t req;
struct amdgpu_device *adev = to_amdgpu_device(control);
struct smu_table_context *smu_table = &adev->smu.smu_table;
- struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+ struct smu_table *table = &smu_table->driver_table;
memset(&req, 0, sizeof(req));
arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
@@ -2261,6 +2261,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 02d33b50e735..b0591a8dda41 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -260,6 +260,15 @@ struct smu_table_context
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
+ /*
+ * The driver table is just a staging buffer for
+ * uploading/downloading content from the SMU.
+ *
+ * And the table_id for SMU_MSG_TransferTableSmu2Dram/
+ * SMU_MSG_TransferTableDram2Smu instructs SMU
+ * which content driver is interested.
+ */
+ struct smu_table driver_table;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
@@ -498,6 +507,7 @@ struct pptable_funcs {
int (*set_gfx_cgpg)(struct smu_context *smu, bool enable);
int (*write_pptable)(struct smu_context *smu);
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+ int (*set_driver_table_location)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index db3f78676aeb..662989296174 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -170,6 +170,8 @@ int smu_v11_0_write_pptable(struct smu_context *smu);
int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+int smu_v11_0_set_driver_table_location(struct smu_context *smu);
+
int smu_v11_0_set_tool_table_location(struct smu_context *smu);
int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index 3f1cd06e273c..d79e54b5ebf6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -90,4 +90,6 @@ int smu_v12_0_mode2_reset(struct smu_context *smu);
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
+int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index bb0915a6388e..a16af3a3843c 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -2112,6 +2112,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 506cc6bf4bc0..861e6410363b 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -920,6 +920,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
.mode2_reset = smu_v12_0_mode2_reset,
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
+ .set_driver_table_location = smu_v12_0_set_driver_table_location,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 77864e4236c4..783319ec8bf9 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -61,6 +61,8 @@
((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
#define smu_set_min_dcef_deep_sleep(smu) \
((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_driver_table_location(smu) \
+ ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0)
#define smu_set_tool_table_location(smu) \
((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
#define smu_notify_memory_pool_location(smu) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 6fb93eb6ab39..e804f9854027 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -776,6 +776,24 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
}
+int smu_v11_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address));
+ }
+
+ return ret;
+}
+
int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 9e27462d0f4e..870e6db2907e 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -318,14 +318,6 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
int smu_v12_0_populate_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_DPMCLOCKS];
- if (!table)
- return -EINVAL;
-
- if (!table->cpu_addr)
- return -EINVAL;
return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}
@@ -514,3 +506,21 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
return ret;
}
+
+int smu_v12_0_set_driver_table_location(struct smu_context *smu)
+{
+ struct smu_table *driver_table = &smu->smu_table.driver_table;
+ int ret = 0;
+
+ if (driver_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(driver_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(driver_table->mc_address));
+ }
[kevin]:
for these paris of message, the smu driver should be better to add lock to protect bellow case on multi thread case (eg: cat sysfs)
High A + Low B
High B + Low A
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 27bdcdeb08d9..38febd5ca4da 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3236,6 +3236,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.check_fw_version = smu_v11_0_check_fw_version,
.write_pptable = smu_v11_0_write_pptable,
.set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
--
2.24.1
_______________________________________________
amd-gfx mailing list
amd-gfx at lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7CKevin1.Wang%40amd.com%7Cf88b8d717d4745f7521308d78f2d1824%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637135296167400253&sdata=nHLXbyqZO2YNTyE4nTw4SP9ldcZqXijxZuGMSDWPxHM%3D&reserved=0
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/amd-gfx/attachments/20200105/dde15446/attachment-0001.htm>
More information about the amd-gfx
mailing list