[Intel-gfx] [PATCH 02/10] drm/i915: Move i915_gem_retire_work_handler

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Wed Jan 17 10:33:44 UTC 2018


On 15/01/2018 21:24, Chris Wilson wrote:
> In preparation for the next patch, move i915_gem_retire_work_handler()
> later to avoid a forward declaration.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_gem.c | 228 ++++++++++++++++++++--------------------
>   1 file changed, 114 insertions(+), 114 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 87937c4f9dff..a8840a514377 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -3310,120 +3310,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
>   	return true;
>   }
>   
> -static void
> -i915_gem_retire_work_handler(struct work_struct *work)
> -{
> -	struct drm_i915_private *dev_priv =
> -		container_of(work, typeof(*dev_priv), gt.retire_work.work);
> -	struct drm_device *dev = &dev_priv->drm;
> -
> -	/* Come back later if the device is busy... */
> -	if (mutex_trylock(&dev->struct_mutex)) {
> -		i915_gem_retire_requests(dev_priv);
> -		mutex_unlock(&dev->struct_mutex);
> -	}
> -
> -	/* Keep the retire handler running until we are finally idle.
> -	 * We do not need to do this test under locking as in the worst-case
> -	 * we queue the retire worker once too often.
> -	 */
> -	if (READ_ONCE(dev_priv->gt.awake)) {
> -		i915_queue_hangcheck(dev_priv);
> -		queue_delayed_work(dev_priv->wq,
> -				   &dev_priv->gt.retire_work,
> -				   round_jiffies_up_relative(HZ));
> -	}
> -}
> -
> -static inline bool
> -new_requests_since_last_retire(const struct drm_i915_private *i915)
> -{
> -	return (READ_ONCE(i915->gt.active_requests) ||
> -		work_pending(&i915->gt.idle_work.work));
> -}
> -
> -static void
> -i915_gem_idle_work_handler(struct work_struct *work)
> -{
> -	struct drm_i915_private *dev_priv =
> -		container_of(work, typeof(*dev_priv), gt.idle_work.work);
> -	bool rearm_hangcheck;
> -	ktime_t end;
> -
> -	if (!READ_ONCE(dev_priv->gt.awake))
> -		return;
> -
> -	/*
> -	 * Wait for last execlists context complete, but bail out in case a
> -	 * new request is submitted.
> -	 */
> -	end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
> -	do {
> -		if (new_requests_since_last_retire(dev_priv))
> -			return;
> -
> -		if (intel_engines_are_idle(dev_priv))
> -			break;
> -
> -		usleep_range(100, 500);
> -	} while (ktime_before(ktime_get(), end));
> -
> -	rearm_hangcheck =
> -		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
> -
> -	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
> -		/* Currently busy, come back later */
> -		mod_delayed_work(dev_priv->wq,
> -				 &dev_priv->gt.idle_work,
> -				 msecs_to_jiffies(50));
> -		goto out_rearm;
> -	}
> -
> -	/*
> -	 * New request retired after this work handler started, extend active
> -	 * period until next instance of the work.
> -	 */
> -	if (new_requests_since_last_retire(dev_priv))
> -		goto out_unlock;
> -
> -	/*
> -	 * Be paranoid and flush a concurrent interrupt to make sure
> -	 * we don't reactivate any irq tasklets after parking.
> -	 *
> -	 * FIXME: Note that even though we have waited for execlists to be idle,
> -	 * there may still be an in-flight interrupt even though the CSB
> -	 * is now empty. synchronize_irq() makes sure that a residual interrupt
> -	 * is completed before we continue, but it doesn't prevent the HW from
> -	 * raising a spurious interrupt later. To complete the shield we should
> -	 * coordinate disabling the CS irq with flushing the interrupts.
> -	 */
> -	synchronize_irq(dev_priv->drm.irq);
> -
> -	intel_engines_park(dev_priv);
> -	i915_gem_timelines_park(dev_priv);
> -
> -	i915_pmu_gt_parked(dev_priv);
> -
> -	GEM_BUG_ON(!dev_priv->gt.awake);
> -	dev_priv->gt.awake = false;
> -	rearm_hangcheck = false;
> -
> -	if (INTEL_GEN(dev_priv) >= 6)
> -		gen6_rps_idle(dev_priv);
> -
> -	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
> -
> -	intel_runtime_pm_put(dev_priv);
> -out_unlock:
> -	mutex_unlock(&dev_priv->drm.struct_mutex);
> -
> -out_rearm:
> -	if (rearm_hangcheck) {
> -		GEM_BUG_ON(!dev_priv->gt.awake);
> -		i915_queue_hangcheck(dev_priv);
> -	}
> -}
> -
>   void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
>   {
>   	struct drm_i915_private *i915 = to_i915(gem->dev);
> @@ -4798,6 +4684,120 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
>   	}
>   }
>   
> +static void
> +i915_gem_retire_work_handler(struct work_struct *work)
> +{
> +	struct drm_i915_private *dev_priv =
> +		container_of(work, typeof(*dev_priv), gt.retire_work.work);
> +	struct drm_device *dev = &dev_priv->drm;
> +
> +	/* Come back later if the device is busy... */
> +	if (mutex_trylock(&dev->struct_mutex)) {
> +		i915_gem_retire_requests(dev_priv);
> +		mutex_unlock(&dev->struct_mutex);
> +	}
> +
> +	/* Keep the retire handler running until we are finally idle.
> +	 * We do not need to do this test under locking as in the worst-case
> +	 * we queue the retire worker once too often.
> +	 */
> +	if (READ_ONCE(dev_priv->gt.awake)) {
> +		i915_queue_hangcheck(dev_priv);
> +		queue_delayed_work(dev_priv->wq,
> +				   &dev_priv->gt.retire_work,
> +				   round_jiffies_up_relative(HZ));
> +	}
> +}
> +
> +static inline bool
> +new_requests_since_last_retire(const struct drm_i915_private *i915)
> +{
> +	return (READ_ONCE(i915->gt.active_requests) ||
> +		work_pending(&i915->gt.idle_work.work));
> +}
> +
> +static void
> +i915_gem_idle_work_handler(struct work_struct *work)
> +{
> +	struct drm_i915_private *dev_priv =
> +		container_of(work, typeof(*dev_priv), gt.idle_work.work);
> +	bool rearm_hangcheck;
> +	ktime_t end;
> +
> +	if (!READ_ONCE(dev_priv->gt.awake))
> +		return;
> +
> +	/*
> +	 * Wait for last execlists context complete, but bail out in case a
> +	 * new request is submitted.
> +	 */
> +	end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
> +	do {
> +		if (new_requests_since_last_retire(dev_priv))
> +			return;
> +
> +		if (intel_engines_are_idle(dev_priv))
> +			break;
> +
> +		usleep_range(100, 500);
> +	} while (ktime_before(ktime_get(), end));
> +
> +	rearm_hangcheck =
> +		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
> +
> +	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
> +		/* Currently busy, come back later */
> +		mod_delayed_work(dev_priv->wq,
> +				 &dev_priv->gt.idle_work,
> +				 msecs_to_jiffies(50));
> +		goto out_rearm;
> +	}
> +
> +	/*
> +	 * New request retired after this work handler started, extend active
> +	 * period until next instance of the work.
> +	 */
> +	if (new_requests_since_last_retire(dev_priv))
> +		goto out_unlock;
> +
> +	/*
> +	 * Be paranoid and flush a concurrent interrupt to make sure
> +	 * we don't reactivate any irq tasklets after parking.
> +	 *
> +	 * FIXME: Note that even though we have waited for execlists to be idle,
> +	 * there may still be an in-flight interrupt even though the CSB
> +	 * is now empty. synchronize_irq() makes sure that a residual interrupt
> +	 * is completed before we continue, but it doesn't prevent the HW from
> +	 * raising a spurious interrupt later. To complete the shield we should
> +	 * coordinate disabling the CS irq with flushing the interrupts.
> +	 */
> +	synchronize_irq(dev_priv->drm.irq);
> +
> +	intel_engines_park(dev_priv);
> +	i915_gem_timelines_park(dev_priv);
> +
> +	i915_pmu_gt_parked(dev_priv);
> +
> +	GEM_BUG_ON(!dev_priv->gt.awake);
> +	dev_priv->gt.awake = false;
> +	rearm_hangcheck = false;
> +
> +	if (INTEL_GEN(dev_priv) >= 6)
> +		gen6_rps_idle(dev_priv);
> +
> +	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
> +
> +	intel_runtime_pm_put(dev_priv);
> +out_unlock:
> +	mutex_unlock(&dev_priv->drm.struct_mutex);
> +
> +out_rearm:
> +	if (rearm_hangcheck) {
> +		GEM_BUG_ON(!dev_priv->gt.awake);
> +		i915_queue_hangcheck(dev_priv);
> +	}
> +}
> +
>   int i915_gem_suspend(struct drm_i915_private *dev_priv)
>   {
>   	struct drm_device *dev = &dev_priv->drm;
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Regards,

Tvrtko


More information about the Intel-gfx mailing list