[Freedreno] [PATCH 5/5] drm/msm/dpu1: Handle the reg bus ICC path

Jeykumar Sankaran quic_jeykumar at quicinc.com
Wed Apr 19 19:06:50 UTC 2023



On 4/17/2023 8:30 AM, Konrad Dybcio wrote:
> Apart from the already handled data bus (MAS_MDP_Pn<->DDR), there's
> another path that needs to be handled to ensure MDSS functions properly,
> namely the "reg bus", a.k.a the CPU-MDSS interconnect.
> 
> Gating that path may have a variety of effects.. from none to otherwise
> inexplicable DSI timeouts..
> 
> On the DPU side, we need to keep the bus alive. The vendor driver
> kickstarts it to max (300Mbps) throughput on first commit, but in
> exchange for some battery life in rare DPU-enabled-panel-disabled
> usecases, we can request it at DPU init and gate it at suspend.
> 
> Signed-off-by: Konrad Dybcio <konrad.dybcio at linaro.org>
> ---
>   drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 22 ++++++++++++++++++++--
>   drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h |  1 +
>   2 files changed, 21 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
> index dd6c1c40ab9e..d1f77faebbc0 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
> @@ -384,15 +384,17 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
>   	return 0;
>   }
>   
> -static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
> +static int dpu_kms_parse_icc_paths(struct dpu_kms *dpu_kms)
>   {
>   	struct icc_path *path0;
>   	struct icc_path *path1;
> +	struct icc_path *reg_bus_path;
>   	struct drm_device *dev = dpu_kms->dev;
>   	struct device *dpu_dev = dev->dev;
>   
>   	path0 = msm_icc_get(dpu_dev, "mdp0-mem");
>   	path1 = msm_icc_get(dpu_dev, "mdp1-mem");
> +	reg_bus_path = msm_icc_get(dpu_dev, "cpu-cfg");
>   
>   	if (IS_ERR_OR_NULL(path0))
>   		return PTR_ERR_OR_ZERO(path0);
> @@ -404,6 +406,10 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
>   		dpu_kms->mdp_path[1] = path1;
>   		dpu_kms->num_mdp_paths++;
>   	}
> +
> +	if (!IS_ERR_OR_NULL(reg_bus_path))
> +		dpu_kms->reg_bus_path = reg_bus_path;
> +
>   	return 0;
>   }
>   
> @@ -1039,7 +1045,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
>   		DPU_DEBUG("REG_DMA is not defined");
>   	}
>   
> -	dpu_kms_parse_data_bus_icc_path(dpu_kms);
> +	dpu_kms_parse_icc_paths(dpu_kms);
>   
>   	rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
>   	if (rc < 0)
> @@ -1241,6 +1247,9 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
>   	for (i = 0; i < dpu_kms->num_mdp_paths; i++)
>   		icc_set_bw(dpu_kms->mdp_path[i], 0, 0);
>   
> +	if (dpu_kms->reg_bus_path)
> +		icc_set_bw(dpu_kms->reg_bus_path, 0, 0);
> +
>   	return 0;
>   }
>   
> @@ -1261,6 +1270,15 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
>   		return rc;
>   	}
>   
> +	/*
> +	 * The vendor driver supports setting 76.8 / 150 / 300 Mbps on this
How do you arrive at these distint BW values? Are they provided by the 
ICC fwk for the given path?
> +	 * path, but it seems to go for the highest level when display output
> +	 * is enabled and zero otherwise. For simplicity, we can assume that
> +	 * DPU being enabled and running implies that.
> +	 */
> +	if (dpu_kms->reg_bus_path)
> +		icc_set_bw(dpu_kms->reg_bus_path, 0, MBps_to_icc(300));
> +
>   	dpu_vbif_init_memtypes(dpu_kms);
>   
>   	drm_for_each_encoder(encoder, ddev)
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
> index d5d9bec90705..c332381d58c4 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
> @@ -111,6 +111,7 @@ struct dpu_kms {
>   	atomic_t bandwidth_ref;
>   	struct icc_path *mdp_path[2];
>   	u32 num_mdp_paths;
> +	struct icc_path *reg_bus_path;
>   };
>   
>   struct vsync_info {
> 


More information about the dri-devel mailing list