[PATCH] drm/amd/powerplay: issue proper hdp flush for table transferring
Quan, Evan
Evan.Quan at amd.com
Mon Jan 6 06:34:01 UTC 2020
Ping..
> -----Original Message-----
> From: Quan, Evan <Evan.Quan at amd.com>
> Sent: Friday, January 3, 2020 5:47 PM
> To: amd-gfx at lists.freedesktop.org
> Cc: Quan, Evan <Evan.Quan at amd.com>
> Subject: [PATCH] drm/amd/powerplay: issue proper hdp flush for table
> transferring
>
> Guard the content consistence between the view of GPU and CPU during the
> table transferring.
>
> Change-Id: Ib3cebb97a1c8fb302eb040483bbaf089ae00c6a9
> Signed-off-by: Evan Quan <evan.quan at amd.com>
> ---
> drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 15 ++++++++++-----
> .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 5 ++++-
> .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 5 ++++-
> .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c | 5 ++++-
> .../gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c | 10 ++++++++--
> 5 files changed, 30 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> index a56ebcc4e3c7..e1b64134bbd8 100644
> --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
> @@ -529,8 +529,14 @@ int smu_update_table(struct smu_context *smu,
> enum smu_table_id table_index, int
>
> table_size = smu_table->tables[table_index].size;
>
> - if (drv2smu)
> + if (drv2smu) {
> memcpy(table->cpu_addr, table_data, table_size);
> + /*
> + * Flush hdp cache: to guard the content seen by
> + * GPU is consitent with CPU.
> + */
> + amdgpu_asic_flush_hdp(adev, NULL);
> + }
>
> ret = smu_send_smc_msg_with_param(smu, drv2smu ?
> SMU_MSG_TransferTableDram2Smu :
> @@ -539,11 +545,10 @@ int smu_update_table(struct smu_context *smu,
> enum smu_table_id table_index, int
> if (ret)
> return ret;
>
> - /* flush hdp cache */
> - adev->nbio.funcs->hdp_flush(adev, NULL);
> -
> - if (!drv2smu)
> + if (!drv2smu) {
> + amdgpu_asic_flush_hdp(adev, NULL);
> memcpy(table_data, table->cpu_addr, table_size);
> + }
>
> return ret;
> }
> diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> index aa0ee2b46135..2319400a3fcb 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
> @@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct
> pp_hwmgr *hwmgr,
> priv->smu_tables.entry[table_id].table_id);
>
> /* flush hdp cache */
> - adev->nbio.funcs->hdp_flush(adev, NULL);
> + amdgpu_asic_flush_hdp(adev, NULL);
>
> memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
> priv->smu_tables.entry[table_id].size);
> @@ -150,6 +150,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr
> *hwmgr, {
> struct smu10_smumgr *priv =
> (struct smu10_smumgr *)(hwmgr->smu_backend);
> + struct amdgpu_device *adev = hwmgr->adev;
>
> PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
> "Invalid SMU Table ID!", return -EINVAL;); @@ -161,6
> +162,8 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
> memcpy(priv->smu_tables.entry[table_id].table, table,
> priv->smu_tables.entry[table_id].size);
>
> + amdgpu_asic_flush_hdp(adev, NULL);
> +
> smu10_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetDriverDramAddrHigh,
> upper_32_bits(priv-
> >smu_tables.entry[table_id].mc_addr));
> diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> index 39427ca32a15..715564009089 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
> @@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr
> *hwmgr,
> priv->smu_tables.entry[table_id].table_id);
>
> /* flush hdp cache */
> - adev->nbio.funcs->hdp_flush(adev, NULL);
> + amdgpu_asic_flush_hdp(adev, NULL);
>
> memcpy(table, priv->smu_tables.entry[table_id].table,
> priv->smu_tables.entry[table_id].size);
> @@ -70,6 +70,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr
> *hwmgr,
> uint8_t *table, int16_t table_id)
> {
> struct vega10_smumgr *priv = hwmgr->smu_backend;
> + struct amdgpu_device *adev = hwmgr->adev;
>
> /* under sriov, vbios or hypervisor driver
> * has already copy table to smc so here only skip it @@ -87,6 +88,8
> @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
> memcpy(priv->smu_tables.entry[table_id].table, table,
> priv->smu_tables.entry[table_id].size);
>
> + amdgpu_asic_flush_hdp(adev, NULL);
> +
> smu9_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetDriverDramAddrHigh,
> upper_32_bits(priv-
> >smu_tables.entry[table_id].mc_addr));
> diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> index 90c782c132d2..a3915bfcce81 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
> @@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr
> *hwmgr,
> return -EINVAL);
>
> /* flush hdp cache */
> - adev->nbio.funcs->hdp_flush(adev, NULL);
> + amdgpu_asic_flush_hdp(adev, NULL);
>
> memcpy(table, priv->smu_tables.entry[table_id].table,
> priv->smu_tables.entry[table_id].size);
> @@ -84,6 +84,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr
> *hwmgr, {
> struct vega12_smumgr *priv =
> (struct vega12_smumgr *)(hwmgr->smu_backend);
> + struct amdgpu_device *adev = hwmgr->adev;
>
> PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT,
> "Invalid SMU Table ID!", return -EINVAL); @@ -95,6
> +96,8 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
> memcpy(priv->smu_tables.entry[table_id].table, table,
> priv->smu_tables.entry[table_id].size);
>
> + amdgpu_asic_flush_hdp(adev, NULL);
> +
>
> PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(h
> wmgr,
> PPSMC_MSG_SetDriverDramAddrHigh,
> upper_32_bits(priv-
> >smu_tables.entry[table_id].mc_addr)) == 0, diff --git
> a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> index f604612f411f..0db57fb83d30 100644
> --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
> @@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct
> pp_hwmgr *hwmgr,
> return ret);
>
> /* flush hdp cache */
> - adev->nbio.funcs->hdp_flush(adev, NULL);
> + amdgpu_asic_flush_hdp(adev, NULL);
>
> memcpy(table, priv->smu_tables.entry[table_id].table,
> priv->smu_tables.entry[table_id].size);
> @@ -207,6 +207,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr
> *hwmgr, {
> struct vega20_smumgr *priv =
> (struct vega20_smumgr *)(hwmgr->smu_backend);
> + struct amdgpu_device *adev = hwmgr->adev;
> int ret = 0;
>
> PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, @@ -219,6 +220,8
> @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
> memcpy(priv->smu_tables.entry[table_id].table, table,
> priv->smu_tables.entry[table_id].size);
>
> + amdgpu_asic_flush_hdp(adev, NULL);
> +
> PP_ASSERT_WITH_CODE((ret =
> vega20_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetDriverDramAddrHigh,
> upper_32_bits(priv-
> >smu_tables.entry[table_id].mc_addr))) == 0, @@ -242,11 +245,14 @@ int
> vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr, {
> struct vega20_smumgr *priv =
> (struct vega20_smumgr *)(hwmgr->smu_backend);
> + struct amdgpu_device *adev = hwmgr->adev;
> int ret = 0;
>
> memcpy(priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
> priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
>
> + amdgpu_asic_flush_hdp(adev, NULL);
> +
> PP_ASSERT_WITH_CODE((ret =
> vega20_send_msg_to_smc_with_parameter(hwmgr,
> PPSMC_MSG_SetDriverDramAddrHigh,
> upper_32_bits(priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, @@ -
> 290,7 +296,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr
> *hwmgr,
> return ret);
>
> /* flush hdp cache */
> - adev->nbio.funcs->hdp_flush(adev, NULL);
> + amdgpu_asic_flush_hdp(adev, NULL);
>
> memcpy(table, priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
> priv-
> >smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
> --
> 2.24.1
More information about the amd-gfx
mailing list