[Intel-gfx] [PATCH 01/25] drm/i915/fbc: wait for a vblank instead of 50ms when enabling
Rodrigo Vivi
rodrigo.vivi at gmail.com
Tue Jan 26 09:44:44 PST 2016
On Thu, Jan 21, 2016 at 12:03 PM Paulo Zanoni <paulo.r.zanoni at intel.com>
wrote:
> Instead of waiting for 50ms, just wait until the next vblank, since
> it's the minimum requirement. The whole infrastructure of FBC is based
> on vblanks, so waiting for X vblanks instead of X milliseconds sounds
> like the correct way to go. Besides, 50ms may be less than a vblank on
> super slow modes that may or may not exist.
>
> There are some small improvements in PC state residency (due to the
> fact that we're now using 16ms for the common modes instead of 50ms),
> but the biggest advantage is still the correctness of being
> vblank-based instead of time-based.
>
> v2:
> - Rebase after changing the patch order.
> - Update the commit message.
> v3:
> - Fix bogus vblank_get() instead of vblank_count() (Ville).
> - Don't forget to call drm_crtc_vblank_{get,put} (Chris, Ville)
> - Adjust the performance details on the commit message.
> v4:
> - Don't grab the FBC mutex just to grab the vblank (Maarten)
>
> Signed-off-by: Paulo Zanoni <paulo.r.zanoni at intel.com>
> ---
> drivers/gpu/drm/i915/i915_drv.h | 2 +-
> drivers/gpu/drm/i915/intel_fbc.c | 39
> +++++++++++++++++++++++++++++----------
> 2 files changed, 30 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h
> b/drivers/gpu/drm/i915/i915_drv.h
> index 204661f..d8f21f0 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -921,9 +921,9 @@ struct i915_fbc {
>
> struct intel_fbc_work {
> bool scheduled;
> + u32 scheduled_vblank;
> struct work_struct work;
> struct drm_framebuffer *fb;
> - unsigned long enable_jiffies;
> } work;
>
> const char *no_fbc_reason;
> diff --git a/drivers/gpu/drm/i915/intel_fbc.c
> b/drivers/gpu/drm/i915/intel_fbc.c
> index a1988a4..3993b43 100644
> --- a/drivers/gpu/drm/i915/intel_fbc.c
> +++ b/drivers/gpu/drm/i915/intel_fbc.c
> @@ -381,7 +381,17 @@ static void intel_fbc_work_fn(struct work_struct
> *__work)
> container_of(__work, struct drm_i915_private,
> fbc.work.work);
> struct intel_fbc_work *work = &dev_priv->fbc.work;
> struct intel_crtc *crtc = dev_priv->fbc.crtc;
> - int delay_ms = 50;
> + struct drm_vblank_crtc *vblank =
> &dev_priv->dev->vblank[crtc->pipe];
> +
> + if (drm_crtc_vblank_get(&crtc->base)) {
> + DRM_ERROR("vblank not available for FBC on pipe %c\n",
> + pipe_name(crtc->pipe));
> +
> + mutex_lock(&dev_priv->fbc.lock);
> + work->scheduled = false;
>
I couldn't understand this here... doesn't look related to s/time/vblank...
could you please explain?
+ mutex_unlock(&dev_priv->fbc.lock);
> + return;
> + }
>
> retry:
> /* Delay the actual enabling to let pageflipping cease and the
> @@ -390,14 +400,16 @@ retry:
> * vblank to pass after disabling the FBC before we attempt
> * to modify the control registers.
> *
> - * A more complicated solution would involve tracking vblanks
> - * following the termination of the page-flipping sequence
> - * and indeed performing the enable as a co-routine and not
> - * waiting synchronously upon the vblank.
> - *
> * WaFbcWaitForVBlankBeforeEnable:ilk,snb
>
hm... is it still valid for newer platforms or we should put a if gen <=6
on these checks?
> + *
> + * It is also worth mentioning that since work->scheduled_vblank
> can be
> + * updated multiple times by the other threads, hitting the
> timeout is
> + * not an error condition. We'll just end up hitting the "goto
> retry"
> + * case below.
> */
> - wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
> + wait_event_timeout(vblank->queue,
> + drm_crtc_vblank_count(&crtc->base) !=
> work->scheduled_vblank,
> + msecs_to_jiffies(50));
>
> mutex_lock(&dev_priv->fbc.lock);
>
> @@ -406,8 +418,7 @@ retry:
> goto out;
>
> /* Were we delayed again while this function was sleeping? */
> - if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
> - jiffies)) {
> + if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
> mutex_unlock(&dev_priv->fbc.lock);
> goto retry;
> }
> @@ -419,6 +430,7 @@ retry:
>
> out:
> mutex_unlock(&dev_priv->fbc.lock);
> + drm_crtc_vblank_put(&crtc->base);
> }
>
> static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
> @@ -434,13 +446,20 @@ static void intel_fbc_schedule_activation(struct
> intel_crtc *crtc)
>
> WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
>
> + if (drm_crtc_vblank_get(&crtc->base)) {
> + DRM_ERROR("vblank not available for FBC on pipe %c\n",
> + pipe_name(crtc->pipe));
> + return;
> + }
> +
> /* It is useless to call intel_fbc_cancel_work() in this function
> since
> * we're not releasing fbc.lock, so it won't have an opportunity
> to grab
> * it to discover that it was cancelled. So we just update the
> expected
> * jiffy count. */
> work->fb = crtc->base.primary->fb;
> work->scheduled = true;
> - work->enable_jiffies = jiffies;
> + work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
> + drm_crtc_vblank_put(&crtc->base);
>
> schedule_work(&work->work);
> }
> --
> 2.6.4
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.freedesktop.org/archives/intel-gfx/attachments/20160126/730e0e53/attachment.html>
More information about the Intel-gfx
mailing list