[PATCH 11/14] drm/amd/display: refactor riommu invalidation wa

Solomon Chiu solomon.chiu at amd.com
Sat Jul 24 04:09:58 UTC 2021


From: Eric Yang <Eric.Yang2 at amd.com>

[Why]
A cleaner solution, only done once on boot.

[How]
Remove previous workaround and configure an extra
vmid one time on boot

Reviewed-by: Kazlauskas Nicholas <Nicholas.Kazlauskas at amd.com>
Acked-by: Solomon Chiu <solomon.chiu at amd.com>
Signed-off-by: Eric Yang <Eric.Yang2 at amd.com>
---
 .../drm/amd/display/dc/dcn20/dcn20_hwseq.c    |  6 ---
 .../drm/amd/display/dc/dcn31/dcn31_hubbub.c   | 48 +++++++++++--------
 .../drm/amd/display/dc/dcn31/dcn31_hwseq.c    | 17 -------
 .../gpu/drm/amd/display/dc/dcn31/dcn31_init.c |  2 +-
 .../drm/amd/display/dc/dcn31/dcn31_resource.c |  1 -
 .../gpu/drm/amd/display/dc/inc/hw/dchubbub.h  |  3 --
 .../amd/display/dc/inc/hw_sequencer_private.h |  1 -
 7 files changed, 28 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index ef185b93b31d..5c2853654cca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -570,12 +570,6 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
 
-	if (hws->wa.early_riommu_invalidation) {
-		struct hubbub *hubbub = dc->res_pool->hubbub;
-
-		hubbub->funcs->apply_invalidation_req_wa(hubbub, &hubbub->vmid_cache);
-	}
-
 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
 
 	/* In flip immediate with pipe splitting case GSL is used for
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index ef233cb49b31..90c73a1cb986 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -876,11 +876,35 @@ static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
 static int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
 		struct dcn_hubbub_phys_addr_config *pa_config)
 {
-	hubbub3_init_dchub_sys_ctx(hubbub, pa_config);
+	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+	struct dcn_vmid_page_table_config phys_config;
 
-	dcn21_dchvm_init(hubbub);
+	REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
+			FB_BASE, pa_config->system_aperture.fb_base >> 24);
+	REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
+			FB_TOP, pa_config->system_aperture.fb_top >> 24);
+	REG_SET(DCN_VM_FB_OFFSET, 0,
+			FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
+	REG_SET(DCN_VM_AGP_BOT, 0,
+			AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
+	REG_SET(DCN_VM_AGP_TOP, 0,
+			AGP_TOP, pa_config->system_aperture.agp_top >> 24);
+	REG_SET(DCN_VM_AGP_BASE, 0,
+			AGP_BASE, pa_config->system_aperture.agp_base >> 24);
 
-	hubbub->vmid_cache = *pa_config;
+	if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+		phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+		phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+		phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+		phys_config.depth = 0;
+		phys_config.block_size = 0;
+		// Init VMID 0 based on PA config
+		dcn20_vmid_setup(&hubbub2->vmid[0], &phys_config);
+
+		dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config);
+	}
+
+	dcn21_dchvm_init(hubbub);
 
 	return NUM_VMID;
 }
@@ -922,23 +946,6 @@ static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub,
 	}
 }
 
-static void hubbub31_apply_invalidation_req_wa(struct hubbub *hubbub,
-		struct dcn_hubbub_phys_addr_config *pa_config)
-{
-	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
-	struct dcn_vmid_page_table_config phys_config;
-
-	if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
-		phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
-		phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
-		phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
-		phys_config.depth = 0;
-		phys_config.block_size = 0;
-		// Program an arbitrary unused VMID
-		dcn20_vmid_setup(&hubbub1->vmid[15], &phys_config);
-	}
-}
-
 static const struct hubbub_funcs hubbub31_funcs = {
 	.update_dchub = hubbub2_update_dchub,
 	.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
@@ -955,7 +962,6 @@ static const struct hubbub_funcs hubbub31_funcs = {
 	.program_compbuf_size = dcn31_program_compbuf_size,
 	.init_crb = dcn31_init_crb,
 	.hubbub_read_state = hubbub2_read_state,
-	.apply_invalidation_req_wa = hubbub31_apply_invalidation_req_wa
 };
 
 void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 83f7904630e6..6399e8acd093 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -598,20 +598,3 @@ bool dcn31_is_abm_supported(struct dc *dc,
 	}
 	return false;
 }
-
-static void apply_riommu_invalidation_wa(struct dc *dc)
-{
-	struct dce_hwseq *hws = dc->hwseq;
-
-	if (!hws->wa.early_riommu_invalidation)
-		return;
-
-	REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DISABLE_HOSTVM_FORCE_ALLOW_PSTATE, 0);
-}
-
-void dcn31_init_pipes(struct dc *dc, struct dc_state *context)
-{
-	dcn10_init_pipes(dc, context);
-	apply_riommu_invalidation_wa(dc);
-
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index aaf2dbd095fe..05954045c332 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -103,7 +103,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
 };
 
 static const struct hwseq_private_funcs dcn31_private_funcs = {
-	.init_pipes = dcn31_init_pipes,
+	.init_pipes = dcn10_init_pipes,
 	.update_plane_addr = dcn20_update_plane_addr,
 	.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
 	.update_mpcc = dcn20_update_mpcc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 38c010afade1..f9446ce4fdc5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1302,7 +1302,6 @@ static struct dce_hwseq *dcn31_hwseq_create(
 		hws->regs = &hwseq_reg;
 		hws->shifts = &hwseq_shift;
 		hws->masks = &hwseq_mask;
-		hws->wa.early_riommu_invalidation = true;
 	}
 	return hws;
 }
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 259283d8bde8..713f5558f5e1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -166,15 +166,12 @@ struct hubbub_funcs {
 	void (*program_det_size)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_in_kbyte);
 	void (*program_compbuf_size)(struct hubbub *hubbub, unsigned compbuf_size_kb, bool safe_to_increase);
 	void (*init_crb)(struct hubbub *hubbub);
-	void (*apply_invalidation_req_wa)(struct hubbub *hubbub,
-			struct dcn_hubbub_phys_addr_config *pa_config);
 };
 
 struct hubbub {
 	const struct hubbub_funcs *funcs;
 	struct dc_context *ctx;
 	bool riommu_active;
-	struct dcn_hubbub_phys_addr_config vmid_cache;
 };
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 082549f75978..f7f7e4fff0c2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -41,7 +41,6 @@ struct dce_hwseq_wa {
 	bool DEGVIDCN10_254;
 	bool DEGVIDCN21;
 	bool disallow_self_refresh_during_multi_plane_transition;
-	bool early_riommu_invalidation;
 };
 
 struct hwseq_wa_state {
-- 
2.25.1



More information about the amd-gfx mailing list