[PATCH 14/21] drm/msm/dpu: Reserve resources for CWB
Dmitry Baryshkov
dmitry.baryshkov at linaro.org
Fri Aug 30 17:25:29 UTC 2024
On Thu, Aug 29, 2024 at 01:48:35PM GMT, Jessica Zhang wrote:
> Reserve dedicated pingpong blocks for CWB
Please explain design ideas. Having just a single phrase is usually not
enough.
>
> Signed-off-by: Jessica Zhang <quic_jesszhan at quicinc.com>
> ---
> drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 17 ++++++--
> drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h | 1 +
> drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 65 +++++++++++++++++++++++++++++
> 3 files changed, 79 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> index 36b677cf9c7a..f1bd14d1f89e 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> @@ -2,7 +2,7 @@
> /*
> * Copyright (C) 2013 Red Hat
> * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
> - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
> + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
> *
> * Author: Rob Clark <robdclark at gmail.com>
> */
> @@ -1054,6 +1054,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
> struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
> struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
> int num_pp, num_dsc;
> + bool is_cwb_encoder;
> unsigned int dsc_mask = 0;
> int i;
>
> @@ -1067,6 +1068,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
>
> priv = drm_enc->dev->dev_private;
> dpu_kms = to_dpu_kms(priv->kms);
> + is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) &&
> + dpu_enc->disp_info.intf_type == INTF_WB;
>
> global_state = dpu_kms_get_existing_global_state(dpu_kms);
> if (IS_ERR_OR_NULL(global_state)) {
> @@ -1077,9 +1080,15 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
> trace_dpu_enc_mode_set(DRMID(drm_enc));
>
> /* Query resource that have been reserved in atomic check step. */
> - num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
> - drm_enc->crtc, DPU_HW_BLK_PINGPONG, hw_pp,
> - ARRAY_SIZE(hw_pp));
> + if (is_cwb_encoder)
> + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
> + drm_enc->crtc, DPU_HW_BLK_DCWB_PINGPONG, hw_pp,
> + ARRAY_SIZE(hw_pp));
> + else
> + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
> + drm_enc->crtc, DPU_HW_BLK_PINGPONG, hw_pp,
> + ARRAY_SIZE(hw_pp));
Why is this necessary? Can we still use DPU_HW_BLK_PINGPONG?
> +
> dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
> drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
>
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
> index c43cb55fe1d2..c87790a1b940 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
> @@ -77,6 +77,7 @@ enum dpu_hw_blk_type {
> DPU_HW_BLK_LM,
> DPU_HW_BLK_CTL,
> DPU_HW_BLK_PINGPONG,
> + DPU_HW_BLK_DCWB_PINGPONG,
> DPU_HW_BLK_INTF,
> DPU_HW_BLK_WB,
> DPU_HW_BLK_DSPP,
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> index 13f84375e15d..afad26556cd5 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> @@ -236,6 +236,48 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
> return -EINVAL;
> }
>
> +static int _dpu_rm_reserve_cwb_pingpong(struct dpu_rm *rm,
> + struct dpu_global_state *global_state, uint32_t crtc_id,
> + struct msm_display_topology *topology)
> +{
> + int num_cwb_pp = topology->num_lm, cwb_pp_count = 0;
> + int cwb_pp_idx[MAX_BLOCKS];
> +
> + /*
> + * Reserve additional dedicated CWB pingpong blocks for each mixer
> + *
> + * TODO: add support for reserving non-dedicated CWB pingpong blocks
> + */
> + for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
> + cwb_pp_count < num_cwb_pp; i++) {
> + for (int j = PINGPONG_CWB_0 - PINGPONG_0;
> + j < ARRAY_SIZE(rm->pingpong_blks); j++) {
Alignment...
> + /*
> + * Odd LMs must be assigned to odd pingpongs and even
> + * LMs with even pingpongs
> + */
> + if (reserved_by_other(global_state->pingpong_to_crtc_id,
> + j, crtc_id) ||
> + i % 2 != j % 2)
> + continue;
> +
> + cwb_pp_idx[cwb_pp_count] = j;
> + cwb_pp_count++;
> + break;
> + }
> + }
> +
> + if (cwb_pp_count != num_cwb_pp) {
> + DPU_ERROR("Unable to reserve all cwb pingpongs\n");
> + return -ENAVAIL;
> + }
> +
> + for (int i = 0; i < cwb_pp_count; i++)
> + global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id;
> +
> + return 0;
> +}
> +
> /**
> * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
> * proposed use case requirements, incl. hardwired dependent blocks like
> @@ -617,6 +659,14 @@ static int _dpu_rm_make_reservation(
> return ret;
> }
>
> + if (topology->cwb_enabled) {
> + ret = _dpu_rm_reserve_cwb_pingpong(rm, global_state,
> + crtc_id, topology);
> + if (ret) {
> + DPU_ERROR("unable to find appropriate dcwb pingpongs\n");
> + return ret;
> + }
> + }
>
> ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id,
> topology);
> @@ -706,6 +756,7 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
>
> switch (type) {
> case DPU_HW_BLK_PINGPONG:
> + case DPU_HW_BLK_DCWB_PINGPONG:
> hw_blks = rm->pingpong_blks;
> hw_to_crtc_id = global_state->pingpong_to_crtc_id;
> max_blks = ARRAY_SIZE(rm->pingpong_blks);
> @@ -745,6 +796,20 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
> if (hw_to_crtc_id[i] != crtc_id)
> continue;
>
> + if (type == DPU_HW_BLK_PINGPONG) {
> + struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
> +
> + if (pp->idx >= PINGPONG_CWB_0)
> + continue;
> + }
> +
> + if (type == DPU_HW_BLK_DCWB_PINGPONG) {
> + struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
> +
> + if (pp->idx < PINGPONG_CWB_0)
> + continue;
> + }
> +
> if (num_blks == blks_size) {
> DPU_ERROR("More than %d resources assigned to crtc %d\n",
> blks_size, crtc_id);
>
> --
> 2.34.1
>
--
With best wishes
Dmitry
More information about the Freedreno
mailing list