[Intel-gfx] [PATCH] drm/i915: Rework FBC schedule locking

Rodrigo Vivi rodrigo.vivi at intel.com
Wed Mar 21 17:55:33 UTC 2018


On Fri, Mar 16, 2018 at 04:01:21PM +0100, Maarten Lankhorst wrote:
> Instead of taking fbc->lock inside the worker, don't take any lock
> and cancel the work synchronously to prevent races. Since the worker
> waits for a vblank before activating, wake up all vblank waiters after
> signalling the cancel through unsetting work->scheduled_vblank. This
> will make sure that we don't wait too long when cancelling the activation.
> 
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
> Cc: Paulo Zanoni <paulo.r.zanoni at intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi at intel.com>

Overall I'm not comfortable with removing the locks on fbc activate
so I would deffer this to Paulo.

Also my own experience with atomic variables and vblank counters
weren't very positive on psr+dmc case. I always end up finding
a corner case :(

anyways few dummy questions below...

> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103167
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c |  4 +-
>  drivers/gpu/drm/i915/i915_drv.h     |  3 +-
>  drivers/gpu/drm/i915/intel_fbc.c    | 76 ++++++++++++++++---------------------
>  3 files changed, 36 insertions(+), 47 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 98e169636f86..cbf5504de183 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -1643,9 +1643,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
>  	else
>  		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
>  
> -	if (fbc->work.scheduled)
> +	if (work_busy(&fbc->work.work))
>  		seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
> -			   fbc->work.scheduled_vblank,
> +			   (u64)atomic64_read(&fbc->work.scheduled_vblank),
>  			   drm_crtc_vblank_count(&fbc->crtc->base));
>  
>  	if (intel_fbc_is_active(dev_priv)) {
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 9262cfb8aac2..a7a1b0453320 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -558,8 +558,7 @@ struct intel_fbc {
>  	} params;
>  
>  	struct intel_fbc_work {
> -		bool scheduled;
> -		u64 scheduled_vblank;
> +		atomic64_t scheduled_vblank;

Why FBC is so tied with vblanks?

>  		struct work_struct work;
>  	} work;
>  
> diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
> index 707d49c12638..20d45bcb6b6b 100644
> --- a/drivers/gpu/drm/i915/intel_fbc.c
> +++ b/drivers/gpu/drm/i915/intel_fbc.c
> @@ -357,10 +357,6 @@ static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
>  
>  static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
>  {
> -	struct intel_fbc *fbc = &dev_priv->fbc;
> -
> -	fbc->active = true;
> -
>  	if (INTEL_GEN(dev_priv) >= 7)
>  		gen7_fbc_activate(dev_priv);
>  	else if (INTEL_GEN(dev_priv) >= 5)
> @@ -407,16 +403,13 @@ static void intel_fbc_work_fn(struct work_struct *__work)
>  	struct intel_fbc_work *work = &fbc->work;
>  	struct intel_crtc *crtc = fbc->crtc;
>  	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
> +	u64 vbl;

should we use something more descriptive here
like vbl_counter instead of "vbl" variable right before "vblank" one.

>  
>  	if (drm_crtc_vblank_get(&crtc->base)) {
>  		/* CRTC is now off, leave FBC deactivated */
> -		mutex_lock(&fbc->lock);
> -		work->scheduled = false;
> -		mutex_unlock(&fbc->lock);
>  		return;
>  	}
>  
> -retry:
>  	/* Delay the actual enabling to let pageflipping cease and the
>  	 * display to settle before starting the compression. Note that
>  	 * this delay also serves a second purpose: it allows for a
> @@ -425,34 +418,35 @@ static void intel_fbc_work_fn(struct work_struct *__work)
>  	 *
>  	 * WaFbcWaitForVBlankBeforeEnable:ilk,snb

why not a simple wait 1 vblank below?

>  	 *
> -	 * It is also worth mentioning that since work->scheduled_vblank can be
> -	 * updated multiple times by the other threads, hitting the timeout is
> -	 * not an error condition. We'll just end up hitting the "goto retry"
> -	 * case below.
> +	 * This may be killed by unsetting scheduled_vblank, in which
> +	 * case we return early without activating.
>  	 */
>  	wait_event_timeout(vblank->queue,
> -		drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
> +		!(vbl = atomic64_read(&work->scheduled_vblank)) || vbl != drm_crtc_vblank_count(&crtc->base),
>  		msecs_to_jiffies(50));
>  
> -	mutex_lock(&fbc->lock);
> +	if (vbl)
> +		intel_fbc_hw_activate(dev_priv);
>  
> -	/* Were we cancelled? */
> -	if (!work->scheduled)
> -		goto out;
> +	drm_crtc_vblank_put(&crtc->base);
> +}
>  
> -	/* Were we delayed again while this function was sleeping? */
> -	if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
> -		mutex_unlock(&fbc->lock);
> -		goto retry;
> -	}
> +static void intel_fbc_cancel_activation(struct drm_i915_private *dev_priv)
> +{
> +	struct intel_fbc *fbc = &dev_priv->fbc;
> +	struct intel_crtc *crtc;
> +	struct drm_vblank_crtc *vblank;
>  
> -	intel_fbc_hw_activate(dev_priv);
> +	if (!fbc->active)
> +		return;
>  
> -	work->scheduled = false;
> +	crtc = fbc->crtc;
> +	vblank = &dev_priv->drm.vblank[crtc->pipe];
>  
> -out:
> -	mutex_unlock(&fbc->lock);
> -	drm_crtc_vblank_put(&crtc->base);
> +	/* Kill FBC worker if it's waiting */
> +	atomic64_set(&fbc->work.scheduled_vblank, 0);
> +	wake_up_all(&vblank->queue);
> +	cancel_work_sync(&fbc->work.work);
>  }
>  
>  static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
> @@ -471,12 +465,10 @@ static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
>  		return;
>  	}
>  
> -	/* It is useless to call intel_fbc_cancel_work() or cancel_work() in
> -	 * this function since we're not releasing fbc.lock, so it won't have an
> -	 * opportunity to grab it to discover that it was cancelled. So we just
> -	 * update the expected jiffy count. */
> -	work->scheduled = true;
> -	work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
> +	intel_fbc_cancel_activation(dev_priv);
> +	fbc->active = true;
> +
> +	atomic64_set(&work->scheduled_vblank, drm_crtc_accurate_vblank_count(&crtc->base));
>  	drm_crtc_vblank_put(&crtc->base);
>  
>  	schedule_work(&work->work);
> @@ -489,13 +481,11 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
>  
>  	WARN_ON(!mutex_is_locked(&fbc->lock));
>  
> -	/* Calling cancel_work() here won't help due to the fact that the work
> -	 * function grabs fbc->lock. Just set scheduled to false so the work
> -	 * function can know it was cancelled. */
> -	fbc->work.scheduled = false;
> +	if (fbc->active) {
> +		intel_fbc_cancel_activation(dev_priv);
>  
> -	if (fbc->active)
>  		intel_fbc_hw_deactivate(dev_priv);
> +	}
>  
>  	fbc->no_fbc_reason = reason;
>  }
> @@ -1222,11 +1212,11 @@ void intel_fbc_disable(struct intel_crtc *crtc)
>  	WARN_ON(crtc->active);
>  
>  	mutex_lock(&fbc->lock);
> +	intel_fbc_cancel_activation(dev_priv);
> +
>  	if (fbc->crtc == crtc)
>  		__intel_fbc_disable(dev_priv);
>  	mutex_unlock(&fbc->lock);
> -
> -	cancel_work_sync(&fbc->work.work);
>  }
>  
>  /**
> @@ -1243,13 +1233,13 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
>  		return;
>  
>  	mutex_lock(&fbc->lock);
> +	intel_fbc_cancel_activation(dev_priv);
> +
>  	if (fbc->enabled) {
>  		WARN_ON(fbc->crtc->active);
>  		__intel_fbc_disable(dev_priv);
>  	}
>  	mutex_unlock(&fbc->lock);
> -
> -	cancel_work_sync(&fbc->work.work);
>  }
>  
>  static void intel_fbc_underrun_work_fn(struct work_struct *work)
> @@ -1373,11 +1363,11 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
>  	struct intel_fbc *fbc = &dev_priv->fbc;
>  
>  	INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
> +	atomic64_set(&fbc->work.scheduled_vblank, 0);
>  	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
>  	mutex_init(&fbc->lock);
>  	fbc->enabled = false;
>  	fbc->active = false;
> -	fbc->work.scheduled = false;
>  
>  	if (need_fbc_vtd_wa(dev_priv))
>  		mkwrite_device_info(dev_priv)->has_fbc = false;
> -- 
> 2.16.2
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx


More information about the Intel-gfx mailing list