[Freedreno] [PATCH v4 03/19] drm/msm/dpu: simplify peer LM handling

Dmitry Baryshkov dmitry.baryshkov at linaro.org
Mon Jul 3 01:36:39 UTC 2023


On Mon, 3 Jul 2023 at 04:34, Abhinav Kumar <quic_abhinavk at quicinc.com> wrote:
>
>
>
> On 6/19/2023 2:25 PM, Dmitry Baryshkov wrote:
> > For each LM there is at max 1 peer LM which can be driven by the same
> > CTL, so there no need to have a mask instead of just an ID of the peer
> > LM.
> >
>
> The change is ok but the wording seems incorrect. Are you implying that
> only LM0 and LM1 can be used for CTL0 and so-on? Because thats how this
> is implying.
>
> So any LM can be used with any CTL. Its just that each LM has only one
> peer. No need to mention anything about CTL.

Please correct me if I am wrong, with pre-active CTL, each CTL could
drive any single LM or a fixed LM pair. That's what was meant here.
Would it be better if I rephrase the commit message in this way?

>
> > Reviewed-by: Marijn Suijten <marijn.suijten at somainline.org>
> > Tested-by: Marijn Suijten <marijn.suijten at somainline.org>
> > Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov at linaro.org>
> > ---
> >   .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c    |  2 +-
> >   .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h    |  4 +--
> >   drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c        | 34 +++++++------------
> >   3 files changed, 15 insertions(+), 25 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
> > index 0de507d4d7b7..30fb5b1f3966 100644
> > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
> > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
> > @@ -394,7 +394,7 @@ static const struct dpu_sspp_sub_blks qcm2290_dma_sblk_0 = _DMA_SBLK("8", 1);
> >       .features = _fmask, \
> >       .sblk = _sblk, \
> >       .pingpong = _pp, \
> > -     .lm_pair_mask = (1 << _lmpair), \
> > +     .lm_pair = _lmpair, \
> >       .dspp = _dspp \
> >       }
> >
> > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
> > index b860784ade72..b07caa4b867e 100644
> > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
> > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
> > @@ -554,14 +554,14 @@ struct dpu_sspp_cfg {
> >    * @features           bit mask identifying sub-blocks/features
> >    * @sblk:              LM Sub-blocks information
> >    * @pingpong:          ID of connected PingPong, PINGPONG_NONE if unsupported
> > - * @lm_pair_mask:      Bitmask of LMs that can be controlled by same CTL
> > + * @lm_pair:           ID of LM that can be controlled by same CTL
> >    */
> >   struct dpu_lm_cfg {
> >       DPU_HW_BLK_INFO;
> >       const struct dpu_lm_sub_blks *sblk;
> >       u32 pingpong;
> >       u32 dspp;
> > -     unsigned long lm_pair_mask;
> > +     unsigned long lm_pair;
> >   };
> >
> >   /**
> > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> > index 471842bbb950..e333f4eeafc1 100644
> > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
> > @@ -253,28 +253,19 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
> >   }
> >
> >   /**
> > - * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
> > + * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
> >    * @rm: dpu resource manager handle
> >    * @primary_idx: index of primary mixer in rm->mixer_blks[]
> > - * @peer_idx: index of other mixer in rm->mixer_blks[]
> > - * Return: true if rm->mixer_blks[peer_idx] is a peer of
> > - *          rm->mixer_blks[primary_idx]
> >    */
> > -static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
> > -             int peer_idx)
> > +static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
> >   {
> >       const struct dpu_lm_cfg *prim_lm_cfg;
> > -     const struct dpu_lm_cfg *peer_cfg;
> >
> >       prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
> > -     peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
> >
> > -     if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
> > -             DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
> > -                             peer_cfg->id);
> > -             return false;
> > -     }
> > -     return true;
> > +     if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX)
> > +             return prim_lm_cfg->lm_pair - LM_0;
> > +     return -EINVAL;
> >   }
> >
> >   /**
> > @@ -351,7 +342,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
> >       int lm_idx[MAX_BLOCKS];
> >       int pp_idx[MAX_BLOCKS];
> >       int dspp_idx[MAX_BLOCKS] = {0};
> > -     int i, j, lm_count = 0;
> > +     int i, lm_count = 0;
> >
> >       if (!reqs->topology.num_lm) {
> >               DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
> > @@ -376,16 +367,15 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
> >               ++lm_count;
> >
> >               /* Valid primary mixer found, find matching peers */
> > -             for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
> > -                             lm_count < reqs->topology.num_lm; j++) {
> > -                     if (!rm->mixer_blks[j])
> > +             if (lm_count < reqs->topology.num_lm) {
> > +                     int j = _dpu_rm_get_lm_peer(rm, i);
> > +
> > +                     /* ignore the peer if there is an error or if the peer was already processed */
> > +                     if (j < 0 || j < i)
> >                               continue;
> >
> > -                     if (!_dpu_rm_check_lm_peer(rm, i, j)) {
> > -                             DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
> > -                                             LM_0 + i);
> > +                     if (!rm->mixer_blks[j])
> >                               continue;
> > -                     }
> >
> >                       if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
> >                                       global_state, enc_id, j,



-- 
With best wishes
Dmitry


More information about the Freedreno mailing list