[PATCH RESEND] drm/amd/display: move remaining FPU code to dml folder
Rodrigo Siqueira
Rodrigo.Siqueira at amd.com
Tue Oct 25 15:42:13 UTC 2022
Hi Ao,
First of all, thanks a lot for this patch.
On 10/20/22 20:31, Ao Zhong wrote:
> Move remaining FPU code to dml folder
> in preparation for enabling aarch64 support.
I guess you found some of the issues here after you tried enabling the
arm64 compilation, right? If so, could you expand the commit message to
describe it better?
>
> Signed-off-by: Ao Zhong <hacc1225 at gmail.com>
> ---
> .../drm/amd/display/dc/dcn10/dcn10_resource.c | 44 +------------------
> .../drm/amd/display/dc/dcn32/dcn32_resource.c | 5 ++-
> .../drm/amd/display/dc/dml/dcn10/dcn10_fpu.c | 40 +++++++++++++++++
> .../drm/amd/display/dc/dml/dcn10/dcn10_fpu.h | 3 ++
> .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 8 ++++
> .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.h | 3 ++
Could you split this commit in two parts?
One for DCN10 and another one for DCN32.
> 6 files changed, 59 insertions(+), 44 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
> index 56d30baf12df..6bfac8088ab0 100644
> --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
> +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
> @@ -1295,47 +1295,6 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
> return value;
> }
>
> -/*
> - * Some architectures don't support soft-float (e.g. aarch64), on those
> - * this function has to be called with hardfloat enabled, make sure not
> - * to inline it so whatever fp stuff is done stays inside
> - */
> -static noinline void dcn10_resource_construct_fp(
> - struct dc *dc)
> -{
> - if (dc->ctx->dce_version == DCN_VERSION_1_01) {
> - struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc;
> - struct dcn_ip_params *dcn_ip = dc->dcn_ip;
> - struct display_mode_lib *dml = &dc->dml;
> -
> - dml->ip.max_num_dpp = 3;
> - /* TODO how to handle 23.84? */
> - dcn_soc->dram_clock_change_latency = 23;
> - dcn_ip->max_num_dpp = 3;
> - }
> - if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
> - dc->dcn_soc->urgent_latency = 3;
> - dc->debug.disable_dmcu = true;
> - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
> - }
> -
> -
> - dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
> - ASSERT(dc->dcn_soc->number_of_channels < 3);
> - if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
> - dc->dcn_soc->number_of_channels = 2;
> -
> - if (dc->dcn_soc->number_of_channels == 1) {
> - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
> - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
> - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
> - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
> - if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
> - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
> - }
> - }
> -}
> -
> static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
> {
> int i;
> @@ -1510,8 +1469,9 @@ static bool dcn10_resource_construct(
> memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
> memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
>
> - /* Other architectures we build for build this with soft-float */
> + DC_FP_START();
> dcn10_resource_construct_fp(dc);
> + DC_FP_END();
>
> if (!dc->config.is_vmin_only_asic)
> if (ASICREV_IS_RAVEN2(dc->ctx->asic_id.hw_internal_rev))
> diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
> index a88dd7b3d1c1..287b7fa9bf41 100644
> --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
> +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
> @@ -1918,8 +1918,9 @@ int dcn32_populate_dml_pipes_from_context(
> timing = &pipe->stream->timing;
>
> pipes[pipe_cnt].pipe.src.gpuvm = true;
> - pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
> - pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
> + DC_FP_START();
> + dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
> + DC_FP_END();
> pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
> pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
> pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
> diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c
> index 99644d896222..0495cecaf1df 100644
> --- a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c
> +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c
> @@ -27,6 +27,8 @@
> #include "dcn10/dcn10_resource.h"
>
> #include "dcn10_fpu.h"
> +#include "resource.h"
> +#include "amdgpu_dm/dc_fpu.h"
>
> /**
> * DOC: DCN10 FPU manipulation Overview
> @@ -121,3 +123,41 @@ struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
> .writeback_dram_clock_change_latency_us = 23.0,
> .return_bus_width_bytes = 64,
> };
> +
> +void dcn10_resource_construct_fp(
> + struct dc *dc)
Since this is a small function signature, could you add the dc parameter
in the same line as the function name? Same idea for the header file.
> +{
> + dc_assert_fp_enabled();
> +
Drop the extra space in the above line.
Thanks
Siqueira
> + if (dc->ctx->dce_version == DCN_VERSION_1_01) {
> + struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc;
> + struct dcn_ip_params *dcn_ip = dc->dcn_ip;
> + struct display_mode_lib *dml = &dc->dml;
> +
> + dml->ip.max_num_dpp = 3;
> + /* TODO how to handle 23.84? */
> + dcn_soc->dram_clock_change_latency = 23;
> + dcn_ip->max_num_dpp = 3;
> + }
> + if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
> + dc->dcn_soc->urgent_latency = 3;
> + dc->debug.disable_dmcu = true;
> + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
> + }
> +
> +
> + dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
> + ASSERT(dc->dcn_soc->number_of_channels < 3);
> + if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
> + dc->dcn_soc->number_of_channels = 2;
> +
> + if (dc->dcn_soc->number_of_channels == 1) {
> + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
> + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
> + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
> + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
> + if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
> + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
> + }
> + }
> +}
> diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h
> index e74ed4b4ce5b..dcbfb73b0afd 100644
> --- a/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h
> +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h
> @@ -27,4 +27,7 @@
> #ifndef __DCN10_FPU_H__
> #define __DCN10_FPU_H__
>
> +void dcn10_resource_construct_fp(
> + struct dc *dc);
> +
> #endif /* __DCN20_FPU_H__ */
> diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
> index 819de0f11012..58772fce6437 100644
> --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
> +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
> @@ -2521,3 +2521,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
> }
> }
>
> +void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
> + int pipe_cnt)
> +{
> + dc_assert_fp_enabled();
> +
> + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
> + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
> +}
> diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
> index 3a3dc2ce4c73..ab010e7e840b 100644
> --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
> +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
> @@ -73,4 +73,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
>
> void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
>
> +void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
> + int pipe_cnt);
> +
> #endif
More information about the amd-gfx
mailing list