[DPU PATCH 06/11] drm/msm: Remove msm_commit/kthread, use atomic helper commit
Sean Paul
seanpaul at chromium.org
Mon Mar 12 20:23:10 UTC 2018
On Thu, Mar 08, 2018 at 05:08:03PM -0800, Jeykumar Sankaran wrote:
> On 2018-03-02 06:56, Sean Paul wrote:
> > On Thu, Mar 01, 2018 at 07:37:10PM -0500, Rob Clark wrote:
> > > On Thu, Mar 1, 2018 at 3:37 PM, <jsanka at codeaurora.org> wrote:
> > > > On 2018-03-01 07:27, Sean Paul wrote:
> > > >>
> > > >> On Wed, Feb 28, 2018 at 08:07:00PM -0800, jsanka at codeaurora.org
> > wrote:
> > > >>>
> > > >>> On 2018-02-28 11:19, Sean Paul wrote:
> > > >>> > Moving further towards switching fully to the the atomic helpers,
> > this
> > > >>> > patch removes the hand-rolled kthread nonblock commit code and
> > uses
> > > >>
> > > >> the
> > > >>>
> > > >>> > atomic helpers commit_work model.
> > > >>> >
> > > >>> > There's still a lot of copypasta here, but it's still needed to
> > > >>> > facilitate the swap_state and prepare_fence private functions.
> > These
> > > >>> > will be sorted out in a follow-on patch.
> > > >>> >
> > > >>> > Change-Id: I9fcba27824ba63d3fab96cb2bc194bfa6f3475b7
> > > >>> > Signed-off-by: Sean Paul <seanpaul at chromium.org>
> > > >>> > ---
> > > >>> > drivers/gpu/drm/msm/msm_atomic.c | 199
> > > >>
> > > >> ++++++-------------------------
> > > >>>
> > > >>> > drivers/gpu/drm/msm/msm_drv.c | 1 -
> > > >>> > drivers/gpu/drm/msm/msm_drv.h | 4 -
> > > >>> > 3 files changed, 35 insertions(+), 169 deletions(-)
> > > >>> >
> > > >>> > diff --git a/drivers/gpu/drm/msm/msm_atomic.c
> > > >>> > b/drivers/gpu/drm/msm/msm_atomic.c
> > > >>> > index 3a18bd3dc215..7e54eb65d096 100644
> > > >>> > --- a/drivers/gpu/drm/msm/msm_atomic.c
> > > >>> > +++ b/drivers/gpu/drm/msm/msm_atomic.c
> > > >>> > @@ -21,51 +21,6 @@
> > > >>> > #include "msm_gem.h"
> > > >>> > #include "msm_fence.h"
> > > >>> >
> > > >>> > -struct msm_commit {
> > > >>> > - struct drm_device *dev;
> > > >>> > - struct drm_atomic_state *state;
> > > >>> > - uint32_t crtc_mask;
> > > >>> > - bool nonblock;
> > > >>> > - struct kthread_work commit_work;
> > > >>> > -};
> > > >>> > -
> > > >>> > -/* block until specified crtcs are no longer pending update, and
> > > >>> > - * atomically mark them as pending update
> > > >>> > - */
> > > >>> > -static int start_atomic(struct msm_drm_private *priv, uint32_t
> > > >>> > crtc_mask)
> > > >>> > -{
> > > >>> > - int ret;
> > > >>> > -
> > > >>> > - spin_lock(&priv->pending_crtcs_event.lock);
> > > >>> > - ret =
> > wait_event_interruptible_locked(priv->pending_crtcs_event,
> > > >>> > - !(priv->pending_crtcs & crtc_mask));
> > > >>> > - if (ret == 0) {
> > > >>> > - DBG("start: %08x", crtc_mask);
> > > >>> > - priv->pending_crtcs |= crtc_mask;
> > > >>> > - }
> > > >>> > - spin_unlock(&priv->pending_crtcs_event.lock);
> > > >>> > -
> > > >>> > - return ret;
> > > >>> > -}
> > > >>> > -
> > > >>> > -/* clear specified crtcs (no longer pending update)
> > > >>> > - */
> > > >>> > -static void end_atomic(struct msm_drm_private *priv, uint32_t
> > > >>> > crtc_mask)
> > > >>> > -{
> > > >>> > - spin_lock(&priv->pending_crtcs_event.lock);
> > > >>> > - DBG("end: %08x", crtc_mask);
> > > >>> > - priv->pending_crtcs &= ~crtc_mask;
> > > >>> > - wake_up_all_locked(&priv->pending_crtcs_event);
> > > >>> > - spin_unlock(&priv->pending_crtcs_event.lock);
> > > >>> > -}
> > > >>> > -
> > > >>> > -static void commit_destroy(struct msm_commit *c)
> > > >>> > -{
> > > >>> > - end_atomic(c->dev->dev_private, c->crtc_mask);
> > > >>> > - if (c->nonblock)
> > > >>> > - kfree(c);
> > > >>> > -}
> > > >>> > -
> > > >>> > static void msm_atomic_wait_for_commit_done(
> > > >>> > struct drm_device *dev,
> > > >>> > struct drm_atomic_state *old_state)
> > > >>> > @@ -118,6 +73,10 @@ static void msm_atomic_commit_tail(struct
> > > >>> > drm_atomic_state *state)
> > > >>> >
> > > >>> > msm_atomic_wait_for_commit_done(dev, state);
> > > >>> >
> > > >>> > + drm_atomic_helper_commit_hw_done(state);
> > > >>> > +
> > > >>> > + drm_atomic_helper_wait_for_vblanks(dev, state);
> > > >>> > +
> > > >>> > drm_atomic_helper_cleanup_planes(dev, state);
> > > >>> >
> > > >>> > kms->funcs->complete_commit(kms, state);
> > > >>> > @@ -126,109 +85,25 @@ static void msm_atomic_commit_tail(struct
> > > >>> > drm_atomic_state *state)
> > > >>> > /* The (potentially) asynchronous part of the commit. At this
> > point
> > > >>> > * nothing can fail short of armageddon.
> > > >>> > */
> > > >>> > -static void complete_commit(struct msm_commit *c)
> > > >>> > +static void commit_tail(struct drm_atomic_state *state)
> > > >>> > {
> > > >>> > - struct drm_atomic_state *state = c->state;
> > > >>> > - struct drm_device *dev = state->dev;
> > > >>> > + drm_atomic_helper_wait_for_fences(state->dev, state, false);
> > > >>> >
> > > >>> > - drm_atomic_helper_wait_for_fences(dev, state, false);
> > > >>> > + drm_atomic_helper_wait_for_dependencies(state);
> > > >>> >
> > > >>> > msm_atomic_commit_tail(state);
> > > >>> >
> > > >>> > - drm_atomic_state_put(state);
> > > >>> > -}
> > > >>> > -
> > > >>> > -static void _msm_drm_commit_work_cb(struct kthread_work *work)
> > > >>> > -{
> > > >>> > - struct msm_commit *commit = NULL;
> > > >>> > -
> > > >>> > - if (!work) {
> > > >>> > - DRM_ERROR("%s: Invalid commit work data!\n",
> > __func__);
> > > >>> > - return;
> > > >>> > - }
> > > >>> > -
> > > >>> > - commit = container_of(work, struct msm_commit, commit_work);
> > > >>> > -
> > > >>> > - complete_commit(commit);
> > > >>> > -
> > > >>> > - commit_destroy(commit);
> > > >>> > -}
> > > >>> > -
> > > >>> > -static struct msm_commit *commit_init(struct drm_atomic_state
> > *state,
> > > >>> > - bool nonblock)
> > > >>> > -{
> > > >>> > - struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
> > > >>> > + drm_atomic_helper_commit_cleanup_done(state);
> > > >>> >
> > > >>> > - if (!c)
> > > >>> > - return NULL;
> > > >>> > -
> > > >>> > - c->dev = state->dev;
> > > >>> > - c->state = state;
> > > >>> > - c->nonblock = nonblock;
> > > >>> > -
> > > >>> > - kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb);
> > > >>> > -
> > > >>> > - return c;
> > > >>> > + drm_atomic_state_put(state);
> > > >>> > }
> > > >>> >
> > > >>> > -/* Start display thread function */
> > > >>> > -static void msm_atomic_commit_dispatch(struct drm_device *dev,
> > > >>> > - struct drm_atomic_state *state, struct msm_commit
> > > >>> > *commit)
> > > >>> > +static void commit_work(struct work_struct *work)
> > > >>> > {
> > > >>> > - struct msm_drm_private *priv = dev->dev_private;
> > > >>> > - struct drm_crtc *crtc = NULL;
> > > >>> > - struct drm_crtc_state *new_crtc_state = NULL;
> > > >>> > - int ret = -EINVAL, i = 0, j = 0;
> > > >>> > - bool nonblock;
> > > >>> > -
> > > >>> > - /* cache since work will kfree commit in non-blocking case
> > */
> > > >>> > - nonblock = commit->nonblock;
> > > >>> > -
> > > >>> > - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
> > > >>> > - for (j = 0; j < priv->num_crtcs; j++) {
> > > >>> > - if (priv->disp_thread[j].crtc_id ==
> > > >>> > - crtc->base.id) {
> > > >>> > - if (priv->disp_thread[j].thread) {
> > > >>> > - kthread_queue_work(
> > > >>> > -
> > > >>> > &priv->disp_thread[j].worker,
> > > >>> > -
> > > >>> > &commit->commit_work);
> > > >>> Are there any known proposals floating around to support ASYNC
> > commits
> > > >>
> > > >> for
> > > >>>
> > > >>> concurrent displays rendering at different FPS? The above kthread
> > model
> > > >>
> > > >> is
> > > >>>
> > > >>> introduced when we faced some performance road blockers when a
> > display
> > > >>
> > > >> has
> > > >>>
> > > >>> to wait for an ongoing display commit to complete.
> > > >>
> > > >>
> > > >> I think people have discussed it, I'm not sure if there are any
> > patches
> > > >> floating
> > > >> around. On the surface, it seems easy to just push the commit_work
> > into
> > > >> the crtc
> > > >> commit and have one work item per crtc. However I think the problem
> > is
> > > >> that there
> > > >> could be resources switching between crtcs for a given commit, or
> > from one
> > > >> commit
> > > >> to the next, and synchronizing that becomes a Hard Problem.
> > > >>
> > > >> Perhaps I'm misunderstanding, but the start/end atomic functions
> > serialize
> > > >> the
> > > >> incoming commits, right? So the only benefit the kthread provides is
> > to
> > > >> mitigate
> > > >> any blocking calls made on one crtc from blocking a second crtc in
> > the
> > > >> same
> > > >> commit?
> > > >>
> > > >> Sean
> > > >
> > > > I am not sure what level of resource (I assume hw blocks) switching we
> > can
> > > > expect between
> > > > two active CRTC's on successive commits. With virtualization in play,
> > the
> > > > resources allocated to
> > > > CRTC / encoder / Connector will remain attached to the components as
> > long as
> > > > the display is active.
> > > > Planes (HW pipes) are one such entity which can move between the
> > CRTC's
> > > > frequently. Even
> > > > with them planes, the hw assignment should remain valid until the
> > plane is
> > > > detached from
> > > > a CRTC before attaching to the next one.
> > >
> > > I think a good example to think about is virtualizing hwpipe<->plane
> > > mapping, for example, when you need to re-assign hwpipes for the next
> > > frame according to what capabilities are needed (yuv, scaling) or gang
> > > up two hwpipes for wide buffer scanout (or re-use one hwpipe for two
> > > planes).. in mdp5 we keep track of what hwpipes are in use by which
> > > planes in driver global atomic state (I have some revived patches from
> > > architt to convert this to driver private objs)..
> > >
> > > With the atomic model we have serialization in the (to abuse some
> > > terms) "top half" (ie. userspace calling into ioctl) by virtue of the
> > > modeset locks. But there is a second half to this. Because we update
> > > the new incoming global state object about what hwpipes are released
> > > in atomic_check, if the incoming state is committed/swapped in the
> > > "top half" we rely on this state being valid in the "bottom half" (in
> > > this case the wq) before pushing the new state to the hw.. which
> > > essentially forces us to serialize committing the state in the "bottom
> > > half" so things happen in the same order as the "top half" intended.
> > > Otherwise we could try to assign a hwpipe to a different crtc while it
> > > is still scanning out for it's previous crtc. If you have a 30Hz
> > > display plus a 120Hz display, I guess this is a bit sub-optimal.
> >
> > It's still bad even with displays refreshing at the same rate, the worst
> > case
> > could halve the refresh rate if vblanks aren't in sync.
> >
> > >
> > > Maybe the answer is per-crtc wq's for commits plus some sort of
> > > fencing scheme to stall things when there are cross-crtc dependencies
> > > (ie. commit on crtc B depends on hwpipe released by commit on crtc A)?
> > > This way we don't block updates on the faster display when it isn't
> > > required. I'm not quite sure how that would work. But perhaps at
> > > least we could somehow allow out-of-order commits when two updates
> > > don't touch any of the same state obj's.
> >
> > Right, for "normal" updates, you could use the fast path. We already
> > have
> > needs_modeset, so adding needs_sync wouldn't be too bad. I think I ran
> > into this
> > with tegra back in Pixel C days and hacked in a per-crtc worker since
> > the
> > resources were statically mapped. We'll probably run into this in CrOS
> > as
> > we
> > expand our explicit sync support in the compositor.
> >
> > >
> > > (But I do think this is something we should discuss on dri-devel, and
> > > I'd prefer do solve this in the atomic helpers, rather than having
> > > userspace workaround different sets of bugs/quirks for each different
> > > driver. It really isn't something that is hw specific, so it doesn't
> > > belong in the driver.)
> >
> > Agreed, this is a tricky bit of code and it would benefit from being
> > used
> > across
> > all drivers (as well as all drivers benefiting from it). At any rate, I
> > can't
> > use multi-display on my development device right now, so I think we'll
> > need to
> > defer for now.
> >
> > Sean
> >
> > >
> > > BR,
> > > -R
> > >
> > >
> > > > "Start atomic" synchronizes the commit cycle for all the CRTC's by
> > waiting
> > > > for commit complete
> > > > of all the previous frames. But per crtc kthreads allows the current
> > frame
> > > > commits to happen
> > > > independently. For android, we needed this model as each commit thread
> > need
> > > > to wait for input
> > > > plane fences before programming the hardware.
> > > >
> > > > Jeykumar S.
> > > >
> > > >>
> > > >>> > - /* only return zero if work
> > is
> > > >>> > - * queued successfully.
> > > >>> > - */
> > > >>> > - ret = 0;
> > > >>> > - } else {
> > > >>> > - DRM_ERROR(" Error for
> > crtc_id:
> > > >>> > %d\n",
> > > >>> > -
> > > >>> > priv->disp_thread[j].crtc_id);
> > > >>> > - }
> > > >>> > - break;
> > > >>> > - }
> > > >>> > - }
>
> Care to remove priv->disp_thread and all its references as a part of this
> change?
Definitely! Will revise.
Sean
>
> - Jeykumar S
> > > >>> > - /*
> > > >>> > - * TODO: handle cases where there will be more than
> > > >>> > - * one crtc per commit cycle. Remove this check
> > then.
> > > >>> > - * Current assumption is there will be only one crtc
> > > >>> > - * per commit cycle.
> > > >>> > - */
> > > >>> > - if (j < priv->num_crtcs)
> > > >>> > - break;
> > > >>> > - }
> > > >>> > -
> > > >>> > - if (ret) {
> > > >>> > - /**
> > > >>> > - * this is not expected to happen, but at this point
> > the
> > > >>> > state
> > > >>> > - * has been swapped, but we couldn't dispatch to a
> > crtc
> > > >>> > thread.
> > > >>> > - * fallback now to a synchronous complete_commit to
> > try
> > > >>> > and
> > > >>> > - * ensure that SW and HW state don't get out of
> > sync.
> > > >>> > - */
> > > >>> > - DRM_ERROR("failed to dispatch commit to any
> > CRTC\n");
> > > >>> > - complete_commit(commit);
> > > >>> > - } else if (!nonblock) {
> > > >>> > - kthread_flush_work(&commit->commit_work);
> > > >>> > - }
> > > >>> > -
> > > >>> > - /* free nonblocking commits in this context, after
> > processing */
> > > >>> > - if (!nonblock)
> > > >>> > - kfree(commit);
> > > >>> > + struct drm_atomic_state *state = container_of(work,
> > > >>> > + struct
> > > >>> > drm_atomic_state,
> > > >>> > + commit_work);
> > > >>> > + commit_tail(state);
> > > >>> > }
> > > >>> >
> > > >>> > /**
> > > >>> > @@ -247,17 +122,12 @@ int msm_atomic_commit(struct drm_device
> > *dev,
> > > >>> > struct drm_atomic_state *state, bool nonblock)
> > > >>> > {
> > > >>> > struct msm_drm_private *priv = dev->dev_private;
> > > >>> > - struct msm_commit *c;
> > > >>> > struct drm_crtc *crtc;
> > > >>> > struct drm_crtc_state *crtc_state;
> > > >>> > struct drm_plane *plane;
> > > >>> > struct drm_plane_state *old_plane_state, *new_plane_state;
> > > >>> > int i, ret;
> > > >>> >
> > > >>> > - ret = drm_atomic_helper_prepare_planes(dev, state);
> > > >>> > - if (ret)
> > > >>> > - return ret;
> > > >>> > -
> > > >>> > /*
> > > >>> > * Note that plane->atomic_async_check() should fail if we
> > need
> > > >>> > * to re-assign hwpipe or anything that touches global
> > atomic
> > > >>> > @@ -265,32 +135,30 @@ int msm_atomic_commit(struct drm_device
> > *dev,
> > > >>> > * cases.
> > > >>> > */
> > > >>> > if (state->async_update) {
> > > >>> > + ret = drm_atomic_helper_prepare_planes(dev, state);
> > > >>> > + if (ret)
> > > >>> > + return ret;
> > > >>> > +
> > > >>> > drm_atomic_helper_async_commit(dev, state);
> > > >>> > drm_atomic_helper_cleanup_planes(dev, state);
> > > >>> > return 0;
> > > >>> > }
> > > >>> >
> > > >>> > - c = commit_init(state, nonblock);
> > > >>> > - if (!c) {
> > > >>> > - ret = -ENOMEM;
> > > >>> > - goto error;
> > > >>> > - }
> > > >>> > + ret = drm_atomic_helper_setup_commit(state, nonblock);
> > > >>> > + if (ret)
> > > >>> > + return ret;
> > > >>> >
> > > >>> > - /*
> > > >>> > - * Figure out what crtcs we have:
> > > >>> > - */
> > > >>> > - for_each_new_crtc_in_state(state, crtc, crtc_state, i)
> > > >>> > - c->crtc_mask |= drm_crtc_mask(crtc);
> > > >>> > + INIT_WORK(&state->commit_work, commit_work);
> > > >>> >
> > > >>> > - /*
> > > >>> > - * Wait for pending updates on any of the same crtc's and
> > then
> > > >>> > - * mark our set of crtc's as busy:
> > > >>> > - */
> > > >>> > - ret = start_atomic(dev->dev_private, c->crtc_mask);
> > > >>> > + ret = drm_atomic_helper_prepare_planes(dev, state);
> > > >>> > if (ret)
> > > >>> > - goto err_free;
> > > >>> > + return ret;
> > > >>> >
> > > >>> > - BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
> > > >>> > + if (!nonblock) {
> > > >>> > + ret = drm_atomic_helper_wait_for_fences(dev, state,
> > > >>> > true);
> > > >>> > + if (ret)
> > > >>> > + goto error;
> > > >>> > + }
> > > >>> >
> > > >>> > /*
> > > >>> > * This is the point of no return - everything below never
> > fails
> > > >>> > except
> > > >>> > @@ -299,6 +167,8 @@ int msm_atomic_commit(struct drm_device *dev,
> > > >>> > *
> > > >>> > * swap driver private state while still holding state_lock
> > > >>> > */
> > > >>> > + BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
> > > >>> > +
> > > >>> > if (to_kms_state(state)->state)
> > > >>> > priv->kms->funcs->swap_state(priv->kms, state);
> > > >>> >
> > > >>> > @@ -329,12 +199,13 @@ int msm_atomic_commit(struct drm_device
> > *dev,
> > > >>> > */
> > > >>> >
> > > >>> > drm_atomic_state_get(state);
> > > >>> > - msm_atomic_commit_dispatch(dev, state, c);
> > > >>> > + if (nonblock)
> > > >>> > + queue_work(system_unbound_wq, &state->commit_work);
> > > >>> > + else
> > > >>> > + commit_tail(state);
> > > >>> >
> > > >>> > return 0;
> > > >>> >
> > > >>> > -err_free:
> > > >>> > - kfree(c);
> > > >>> > error:
> > > >>> > drm_atomic_helper_cleanup_planes(dev, state);
> > > >>> > return ret;
> > > >>> > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > > >>> > b/drivers/gpu/drm/msm/msm_drv.c
> > > >>> > index eda4a2340f93..b354424cccb5 100644
> > > >>> > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > >>> > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > > >>> > @@ -549,7 +549,6 @@ static int msm_drm_init(struct device *dev,
> > struct
> > > >>> > drm_driver *drv)
> > > >>> > goto mdss_init_fail;
> > > >>> >
> > > >>> > priv->wq = alloc_ordered_workqueue("msm_drm", 0);
> > > >>> > - init_waitqueue_head(&priv->pending_crtcs_event);
> > > >>> >
> > > >>> > INIT_LIST_HEAD(&priv->client_event_list);
> > > >>> > INIT_LIST_HEAD(&priv->inactive_list);
> > > >>> > diff --git a/drivers/gpu/drm/msm/msm_drv.h
> > > >>> > b/drivers/gpu/drm/msm/msm_drv.h
> > > >>> > index cf96a85f4b55..292496b682e8 100644
> > > >>> > --- a/drivers/gpu/drm/msm/msm_drv.h
> > > >>> > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > > >>> > @@ -536,10 +536,6 @@ struct msm_drm_private {
> > > >>> >
> > > >>> > struct workqueue_struct *wq;
> > > >>> >
> > > >>> > - /* crtcs pending async atomic updates: */
> > > >>> > - uint32_t pending_crtcs;
> > > >>> > - wait_queue_head_t pending_crtcs_event;
> > > >>> > -
> > > >>> > unsigned int num_planes;
> > > >>> > struct drm_plane *planes[MAX_PLANES];
>
> --
> Jeykumar S
--
Sean Paul, Software Engineer, Google / Chromium OS
More information about the dri-devel
mailing list