[PATCH] drm/amd/powerplay: guard consistency between CPU copy and local VRAM
Feng, Kenneth
Kenneth.Feng at amd.com
Tue Jul 30 09:15:04 UTC 2019
Reviewed-by: Kenneth Feng <kenneth.feng at amd.com>
-----Original Message-----
From: amd-gfx [mailto:amd-gfx-bounces at lists.freedesktop.org] On Behalf Of Evan Quan
Sent: Tuesday, July 30, 2019 4:59 PM
To: amd-gfx at lists.freedesktop.org
Cc: Quan, Evan <Evan.Quan at amd.com>
Subject: [PATCH] drm/amd/powerplay: guard consistency between CPU copy and local VRAM
[CAUTION: External Email]
This can prevent CPU to use the out-dated copy.
Change-Id: Ia18e89a923e3522e01717aa4d5ba35f8f4f20763
Signed-off-by: Evan Quan <evan.quan at amd.com>
---
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 ++++
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 4 ++++ drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 4 ++++ drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c | 4 ++++ drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c | 8 ++++++++
5 files changed, 24 insertions(+)
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 9b5661dc10da..d99a8aa0defb 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -314,6 +314,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
void *table_data, bool drv2smu) {
struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
struct smu_table *table = NULL;
int ret = 0;
int table_id = smu_table_get_index(smu, table_index); @@ -341,6 +342,9 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
if (ret)
return ret;
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
if (!drv2smu)
memcpy(table_data, table->cpu_addr, table->size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index ca660351a363..59b11ac5b53b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -116,6 +116,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, {
struct smu10_smumgr *priv =
(struct smu10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;); @@ -133,6 +134,9 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 7bfef8d85cda..8e07fc1fb9ce 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -37,6 +37,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id) {
struct vega10_smumgr *priv = hwmgr->smu_backend;
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL); @@ -54,6 +55,9 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 9ad07a91c38b..c11dae720a35 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -42,6 +42,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, {
struct vega12_smumgr *priv =
(struct vega12_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
"Invalid SMU Table ID!", return -EINVAL); @@ -64,6 +65,9 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 957446cf467e..3e97b83950dc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -163,6 +163,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, {
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, @@ -187,6 +188,9 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -266,6 +270,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, {
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
@@ -284,6 +289,9 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
+ /* flush hdp cache */
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
--
2.22.0
_______________________________________________
amd-gfx mailing list
amd-gfx at lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
More information about the amd-gfx
mailing list