[Intel-gfx] [RFC v5] drm/i915/psr: Kill scheduled work for Core platforms.

Pandiyan, Dhinakaran dhinakaran.pandiyan at intel.com
Wed Mar 14 22:38:45 UTC 2018




On Wed, 2018-03-14 at 13:47 -0700, Rodrigo Vivi wrote:
> On Wed, Mar 14, 2018 at 01:24:13PM -0700, Pandiyan, Dhinakaran wrote:
> > On Tue, 2018-03-13 at 15:23 -0700, Rodrigo Vivi wrote:
> > > The immediate enabling is actually not an issue for the
> > > HW perspective for core platforms that have HW tracking.
> > > HW will wait few identical idle frames before transitioning
> > > to actual psr active anyways.
> > > 
> > > Note that this patch also remove the delayed activation
> > > on HSW and BDW introduced by commit 'd0ac896a477d
> > > ("drm/i915: Delay first PSR activation.")'. This was
> > > introduced to fix a blank screen on VLV/CHV and also
> > > masked some frozen screens on other core platforms.
> > > Probably the same that we are now properly hunting and fixing.
> > > 
> > > Furthermore, if we stop using delayed activation on core
> > > platforms we will be able, on following up patches,
> > > to use available workarounds to make HW tracking properly
> > > exit PSR instead of the big nuke of disabling psr and
> > > re-enable on exit and activate respectively.
> > > At least on few reliable cases.
> > > 
> > > v2:(DK): Remove unnecessary WARN_ONs and make some other
> > > 	 VLV | CHV more readable.
> > > v3: Do it regardless the timer rework.
> > > v4: (DK/CI): Add VLV || CHV check on cancel work at psr_disable.
> > > v5: Kill remaining items and fully rework activation functions.
> > > 
> > > Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan at intel.com>
> > > Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> > > ---
> > >  drivers/gpu/drm/i915/i915_debugfs.c |  20 +++---
> > >  drivers/gpu/drm/i915/i915_drv.h     |   3 +-
> > >  drivers/gpu/drm/i915/intel_psr.c    | 134 ++++++++++++++----------------------
> > >  3 files changed, 65 insertions(+), 92 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> > > index 972014b2497d..7dfada863f9c 100644
> > > --- a/drivers/gpu/drm/i915/i915_debugfs.c
> > > +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> > > @@ -2567,15 +2567,14 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
> > >  	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
> > >  	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
> > >  		   dev_priv->psr.busy_frontbuffer_bits);
> > > -	seq_printf(m, "Re-enable work scheduled: %s\n",
> > > -		   yesno(work_busy(&dev_priv->psr.work.work)));
> > >  
> > > -	if (HAS_DDI(dev_priv)) {
> > > -		if (dev_priv->psr.psr2_support)
> > > -			enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
> > > -		else
> > > -			enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
> > > -	} else {
> > > +	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
> > > +		bool scheduled;
> > > +
> > > +		scheduled = work_busy(&dev_priv->psr.vlv_activate_work.work);
> > > +		seq_printf(m, "Re-enable work scheduled: %s\n",
> > > +			   yesno(scheduled));
> > > +
> > >  		for_each_pipe(dev_priv, pipe) {
> > >  			enum transcoder cpu_transcoder =
> > >  				intel_pipe_to_cpu_transcoder(dev_priv, pipe);
> > > @@ -2594,6 +2593,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
> > >  
> > >  			intel_display_power_put(dev_priv, power_domain);
> > >  		}
> > > +	} else {
> > > +		if (dev_priv->psr.psr2_support)
> > > +			enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
> > > +		else
> > > +			enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
> > >  	}
> > >  
> > >  	seq_printf(m, "Main link in standby mode: %s\n",
> > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > > index 74b0e9d8ff62..409997f29e07 100644
> > > --- a/drivers/gpu/drm/i915/i915_drv.h
> > > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > > @@ -598,7 +598,7 @@ struct i915_psr {
> > >  	bool sink_support;
> > >  	struct intel_dp *enabled;
> > >  	bool active;
> > > -	struct delayed_work work;
> > > +	struct delayed_work vlv_activate_work;
> > >  	unsigned busy_frontbuffer_bits;
> > >  	bool psr2_support;
> > >  	bool aux_frame_sync;
> > > @@ -613,7 +613,6 @@ struct i915_psr {
> > >  	void (*disable_source)(struct intel_dp *,
> > >  			       const struct intel_crtc_state *);
> > >  	void (*enable_sink)(struct intel_dp *);
> > > -	void (*activate)(struct intel_dp *);
> > >  	void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
> > >  };
> > >  
> > > diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
> > > index 317cb4a12693..bc7b94a06417 100644
> > > --- a/drivers/gpu/drm/i915/intel_psr.c
> > > +++ b/drivers/gpu/drm/i915/intel_psr.c
> > > @@ -311,24 +311,6 @@ static void vlv_psr_enable_source(struct intel_dp *intel_dp,
> > >  		   VLV_EDP_PSR_ENABLE);
> > >  }
> > >  
> > > -static void vlv_psr_activate(struct intel_dp *intel_dp)
> > > -{
> > > -	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
> > > -	struct drm_device *dev = dig_port->base.base.dev;
> > > -	struct drm_i915_private *dev_priv = to_i915(dev);
> > > -	struct drm_crtc *crtc = dig_port->base.base.crtc;
> > > -	enum pipe pipe = to_intel_crtc(crtc)->pipe;
> > > -
> > > -	/*
> > > -	 * Let's do the transition from PSR_state 1 (inactive) to
> > > -	 * PSR_state 2 (transition to active - static frame transmission).
> > > -	 * Then Hardware is responsible for the transition to
> > > -	 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
> > > -	 */
> > > -	I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
> > > -		   VLV_EDP_PSR_ACTIVE_ENTRY);
> > > -}
> > > -
> > >  static void hsw_activate_psr1(struct intel_dp *intel_dp)
> > >  {
> > >  	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
> > > @@ -434,16 +416,25 @@ static void hsw_psr_activate(struct intel_dp *intel_dp)
> > >  	struct drm_device *dev = dig_port->base.base.dev;
> > >  	struct drm_i915_private *dev_priv = to_i915(dev);
> > >  
> > > +	WARN_ON(dev_priv->psr.active);
> > > +	lockdep_assert_held(&dev_priv->psr.lock);
> > > +
> > >  	/* On HSW+ after we enable PSR on source it will activate it
> > >  	 * as soon as it match configure idle_frame count. So
> > >  	 * we just actually enable it here on activation time.
> > >  	 */
> > > +	if (dev_priv->psr.psr2_support)
> > > +		WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
> > > +	else
> > > +		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
> > >  
> > >  	/* psr1 and psr2 are mutually exclusive.*/
> > >  	if (dev_priv->psr.psr2_support)
> > >  		hsw_activate_psr2(intel_dp);
> > >  	else
> > >  		hsw_activate_psr1(intel_dp);
> > 
> > nit: The branches can be grouped together.
> > 
> > > +
> > > +	dev_priv->psr.active = true;
> > >  }
> > >  
> > >  static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
> > > @@ -568,15 +559,20 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
> > >  	struct drm_device *dev = intel_dig_port->base.base.dev;
> > >  	struct drm_i915_private *dev_priv = to_i915(dev);
> > >  
> > > -	if (dev_priv->psr.psr2_support)
> > > -		WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
> > > -	else
> > > -		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
> > > -	WARN_ON(dev_priv->psr.active);
> > > -	lockdep_assert_held(&dev_priv->psr.lock);
> > > +	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
> > > +		/*
> > > +		 * FIXME: VLV/CHV has many problems, but worst being blank
> > > +		 * screens on first activation. Let's pick the worst case
> > > +		 * delay for now.
> > > +		 */
> > > +		int delay = 1500;
> > >  
> > > -	dev_priv->psr.activate(intel_dp);
> > > -	dev_priv->psr.active = true;
> > > +		if (!work_busy(&dev_priv->psr.vlv_activate_work.work))
> > 
> > Since the work is already cancelled, do we need this?
> 
> if we are for sure canceling everywhere you are right. we could remove this.
> > 
> > The patch looks good to me, fewer indirect calls and easier to read.
> > 
> > 
> > > +			schedule_delayed_work(&dev_priv->psr.vlv_activate_work,
> > > +					      msecs_to_jiffies(delay));
> > > +	} else {
> > > +		hsw_psr_activate(intel_dp);
> > > +	}
> > >  }
> > >  
> > >  static void hsw_psr_enable_source(struct intel_dp *intel_dp,
> > > @@ -652,22 +648,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
> > >  	dev_priv->psr.enable_source(intel_dp, crtc_state);
> > >  	dev_priv->psr.enabled = intel_dp;
> > >  
> > > -	if (INTEL_GEN(dev_priv) >= 9) {
> > > -		intel_psr_activate(intel_dp);
> > > -	} else {
> > > -		/*
> > > -		 * FIXME: Activation should happen immediately since this
> > > -		 * function is just called after pipe is fully trained and
> > > -		 * enabled.
> > > -		 * However on some platforms we face issues when first
> > > -		 * activation follows a modeset so quickly.
> > > -		 *     - On VLV/CHV we get bank screen on first activation
> > > -		 *     - On HSW/BDW we get a recoverable frozen screen until
> > > -		 *       next exit-activate sequence.
> > > -		 */
> > > -		schedule_delayed_work(&dev_priv->psr.work,
> > > -				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
> > > -	}
> > > +	intel_psr_activate(intel_dp);
> > >  
> > >  unlock:
> > >  	mutex_unlock(&dev_priv->psr.lock);
> > > @@ -786,13 +767,14 @@ void intel_psr_disable(struct intel_dp *intel_dp,
> > >  	dev_priv->psr.enabled = NULL;
> > >  	mutex_unlock(&dev_priv->psr.lock);
> > >  
> > > -	cancel_delayed_work_sync(&dev_priv->psr.work);
> > > +	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> > > +		cancel_delayed_work_sync(&dev_priv->psr.vlv_activate_work);
> > >  }
> > >  
> > > -static void intel_psr_work(struct work_struct *work)
> > > +static void vlv_psr_activate_work(struct work_struct *work)
> > >  {
> > >  	struct drm_i915_private *dev_priv =
> > > -		container_of(work, typeof(*dev_priv), psr.work.work);
> > > +	      container_of(work, typeof(*dev_priv), psr.vlv_activate_work.work);
> > >  	struct intel_dp *intel_dp = dev_priv->psr.enabled;
> > >  	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
> > >  	enum pipe pipe = to_intel_crtc(crtc)->pipe;
> > > @@ -802,35 +784,13 @@ static void intel_psr_work(struct work_struct *work)
> > >  	 * PSR might take some time to get fully disabled
> > >  	 * and be ready for re-enable.
> > >  	 */
> > > -	if (HAS_DDI(dev_priv)) {
> > > -		if (dev_priv->psr.psr2_support) {
> > > -			if (intel_wait_for_register(dev_priv,
> > > -						    EDP_PSR2_STATUS,
> > > -						    EDP_PSR2_STATUS_STATE_MASK,
> > > -						    0,
> > > -						    50)) {
> > > -				DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
> > > -				return;
> > > -			}
> > > -		} else {
> > > -			if (intel_wait_for_register(dev_priv,
> > > -						    EDP_PSR_STATUS,
> > > -						    EDP_PSR_STATUS_STATE_MASK,
> > > -						    0,
> > > -						    50)) {
> > > -				DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
> > > -				return;
> > > -			}
> > > -		}
> > > -	} else {
> > > -		if (intel_wait_for_register(dev_priv,
> > > -					    VLV_PSRSTAT(pipe),
> > > -					    VLV_EDP_PSR_IN_TRANS,
> > > -					    0,
> > > -					    1)) {
> > > -			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
> > > -			return;
> > > -		}
> > > +	if (intel_wait_for_register(dev_priv,
> > > +				    VLV_PSRSTAT(pipe),
> > > +				    VLV_EDP_PSR_IN_TRANS,
> > > +				    0,
> > > +				    1)) {
> > > +		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
> > > +		return;
> > >  	}
> > >  	mutex_lock(&dev_priv->psr.lock);
> > >  	intel_dp = dev_priv->psr.enabled;
> > > @@ -846,8 +806,17 @@ static void intel_psr_work(struct work_struct *work)
> > >  	if (dev_priv->psr.busy_frontbuffer_bits)
> > >  		goto unlock;
> > >  
> > > -	intel_psr_activate(intel_dp);
> > > -unlock:
> > > +	/*
> > > +	 * Let's do the transition from PSR_state 1 (inactive) to
> > > +	 * PSR_state 2 (transition to active - static frame transmission).
> > > +	 * Then Hardware is responsible for the transition to
> > > +	 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
> > > +	 */
> > > +	I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
> > > +		   VLV_EDP_PSR_ACTIVE_ENTRY);
> > > +	dev_priv->psr.active = true;
> > > +
> > > + unlock:
> > >  	mutex_unlock(&dev_priv->psr.lock);
> > >  }
> > >  
> > > @@ -1021,6 +990,9 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
> > >  	if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
> > >  		return;
> > >  
> > > +	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> > > +		cancel_delayed_work_sync(&dev_priv->psr.vlv_activate_work);
> > 
> > psr_invalidate() will also need a cancel_delayed_work_sync()
> 
> indeed...
> 
> > I wonder if
> > this should move to intel_psr_exit(), right before writing to
> > VLV_PSRCTL.
> 
> I'm afraid of calling it from inside the mutex area since it is
> cancel and wait and wait might be on the mutex_lock inside the work...
> 
> thoughts?

Waiting should be okay. We'd be waiting for the mutex if the work
function is already executing and has acquired the mutex. So cancel_work
will not involve any waiting.

But, if the mutex is available for flush and the work function has
already started executing but hasn't acquired the mutex yet, I think we
might deadlock in cancel_work_sync. We can check what lockdep says by
making that change.

The way it is currently (and after you made the change to include
cancel_work in invaldate ) seems racy. If invalidate follows a flush, we
can end up in two situations

1)
	 	Flush
Invalidate	cancel_work()
cancel_work()
		exit()
		schedule_work()
exit()


2)
		Flush
Invalidate	cancel_work
		exit()
		schedule_work()
cancel_work()
exit()



> 
> > 
> > 
> > > +
> > >  	mutex_lock(&dev_priv->psr.lock);
> > >  	if (!dev_priv->psr.enabled) {
> > >  		mutex_unlock(&dev_priv->psr.lock);
> > > @@ -1053,9 +1025,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
> > >  	}
> > >  
> > >  	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
> > > -		if (!work_busy(&dev_priv->psr.work.work))
> > > -			schedule_delayed_work(&dev_priv->psr.work,
> > > -					      msecs_to_jiffies(100));
> > > +		intel_psr_activate(dev_priv->psr.enabled);
> > > +
> > >  	mutex_unlock(&dev_priv->psr.lock);
> > >  }
> > >  
> > > @@ -1102,21 +1073,20 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
> > >  		dev_priv->psr.link_standby = false;
> > >  	}
> > >  
> > > -	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
> > >  	mutex_init(&dev_priv->psr.lock);
> > >  
> > >  	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
> > > +		INIT_DELAYED_WORK(&dev_priv->psr.vlv_activate_work,
> > > +				  vlv_psr_activate_work);
> > >  		dev_priv->psr.enable_source = vlv_psr_enable_source;
> > >  		dev_priv->psr.disable_source = vlv_psr_disable;
> > >  		dev_priv->psr.enable_sink = vlv_psr_enable_sink;
> > > -		dev_priv->psr.activate = vlv_psr_activate;
> > >  		dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
> > >  	} else {
> > >  		dev_priv->psr.has_hw_tracking = true;
> > >  		dev_priv->psr.enable_source = hsw_psr_enable_source;
> > >  		dev_priv->psr.disable_source = hsw_psr_disable;
> > >  		dev_priv->psr.enable_sink = hsw_psr_enable_sink;
> > 
> > We might be able to remove enable_sink also if my aux patch goes
> > through. enable_*sink* required platform specific hooks is a bit odd
> > IMO.
> 
> cool!
> 
> > 
> > 
> > > -		dev_priv->psr.activate = hsw_psr_activate;
> > >  		dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
> > >  	}
> > >  }
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx


More information about the Intel-gfx mailing list