[Intel-gfx] [PATCH 01/62] drm/i915: Only start retire worker when idle

Joonas Lahtinen joonas.lahtinen at linux.intel.com
Tue Jun 7 11:31:07 UTC 2016


On pe, 2016-06-03 at 17:36 +0100, Chris Wilson wrote:
> The retire worker is a low frequency task that makes sure we retire
> outstanding requests if userspace is being lax. We only need to start it
> once as it remains active until the GPU is idle, so do a cheap test
> before the more expensive queue_work(). A consequence of this is that we
> need correct locking in the worker to make the hot path of request
> submission cheap. To keep the symmetry and keep hangcheck strictly bound
> by the GPU's wakelock, we move the cancel_sync(hangcheck) to the idle
> worker before dropping the wakelock.
> 
> v2: Guard against RCU fouling the breadcrumbs bottom-half whilst we kick
> the waiter.
> v3: Remove the wakeref assertion squelching (now we hold a wakeref for
> the hangcheck, any rpm error there is genuine).
> v4: To prevent excess work when retiring requests, we split the busy
> flag into two, a boolean to denote whether we hold the wakeref and a
> bitmask of active engines.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> References: https://bugs.freedesktop.org/show_bug.cgi?id=88437
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c        |   5 +-
>  drivers/gpu/drm/i915/i915_drv.c            |   2 -
>  drivers/gpu/drm/i915/i915_drv.h            |  56 +++++++-------
>  drivers/gpu/drm/i915/i915_gem.c            | 114 ++++++++++++++++++-----------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |   6 ++
>  drivers/gpu/drm/i915/i915_irq.c            |  15 +---
>  drivers/gpu/drm/i915/intel_display.c       |  26 -------
>  drivers/gpu/drm/i915/intel_pm.c            |   2 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.h    |   4 +-
>  9 files changed, 115 insertions(+), 115 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 72dae6fb0aa2..dd6cf222e8f5 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -2437,7 +2437,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
>  	struct drm_file *file;
>  
>  	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
> -	seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
> +	seq_printf(m, "GPU busy? %s [%x]\n",
> +		   yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
>  	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
>  	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
>  		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
> @@ -2777,7 +2778,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
>  	if (!HAS_RUNTIME_PM(dev_priv))
>  		seq_puts(m, "Runtime power management not supported\n");
>  
> -	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
> +	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
>  	seq_printf(m, "IRQs disabled: %s\n",
>  		   yesno(!intel_irqs_enabled(dev_priv)));
>  #ifdef CONFIG_PM
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index 3c8c75c77574..5f7208d2fdbf 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -2697,8 +2697,6 @@ static int intel_runtime_suspend(struct device *device)
>  	i915_gem_release_all_mmaps(dev_priv);
>  	mutex_unlock(&dev->struct_mutex);
>  
> -	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
> -
>  	intel_guc_suspend(dev);
>  
>  	intel_suspend_gt_powersave(dev_priv);
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 88d9242398ce..3f075adf9e84 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1305,37 +1305,11 @@ struct i915_gem_mm {
>  	struct list_head fence_list;
>  
>  	/**
> -	 * We leave the user IRQ off as much as possible,
> -	 * but this means that requests will finish and never
> -	 * be retired once the system goes idle. Set a timer to
> -	 * fire periodically while the ring is running. When it
> -	 * fires, go retire requests.
> -	 */
> -	struct delayed_work retire_work;
> -
> -	/**
> -	 * When we detect an idle GPU, we want to turn on
> -	 * powersaving features. So once we see that there
> -	 * are no more requests outstanding and no more
> -	 * arrive within a small period of time, we fire
> -	 * off the idle_work.
> -	 */
> -	struct delayed_work idle_work;
> -
> -	/**
>  	 * Are we in a non-interruptible section of code like
>  	 * modesetting?
>  	 */
>  	bool interruptible;
>  
> -	/**
> -	 * Is the GPU currently considered idle, or busy executing userspace
> -	 * requests?  Whilst idle, we attempt to power down the hardware and
> -	 * display clocks. In order to reduce the effect on performance, there
> -	 * is a slight delay before we do so.
> -	 */
> -	bool busy;
> -
>  	/* the indicator for dispatch video commands on two BSD rings */
>  	unsigned int bsd_ring_dispatch_index;
>  
> @@ -2034,6 +2008,34 @@ struct drm_i915_private {
>  		int (*init_engines)(struct drm_device *dev);
>  		void (*cleanup_engine)(struct intel_engine_cs *engine);
>  		void (*stop_engine)(struct intel_engine_cs *engine);
> +
> +		/**
> +		 * Is the GPU currently considered idle, or busy executing
> +		 * userspace requests? Whilst idle, we allow runtime power
> +		 * management to power down the hardware and display clocks.
> +		 * In order to reduce the effect on performance, there
> +		 * is a slight delay before we do so.
> +		 */
> +		unsigned active_engines;
> +		bool awake;
> +
> +		/**
> +		 * We leave the user IRQ off as much as possible,
> +		 * but this means that requests will finish and never
> +		 * be retired once the system goes idle. Set a timer to
> +		 * fire periodically while the ring is running. When it
> +		 * fires, go retire requests.
> +		 */
> +		struct delayed_work retire_work;
> +
> +		/**
> +		 * When we detect an idle GPU, we want to turn on
> +		 * powersaving features. So once we see that there
> +		 * are no more requests outstanding and no more
> +		 * arrive within a small period of time, we fire
> +		 * off the idle_work.
> +		 */
> +		struct delayed_work idle_work;

Code motion would be cool in separate patches, but well it's a 62 patch
series already.

>  	} gt;
>  
>  	/* perform PHY state sanity checks? */
> @@ -3247,7 +3249,7 @@ int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
>  struct drm_i915_gem_request *
>  i915_gem_find_active_request(struct intel_engine_cs *engine);
>  
> -bool i915_gem_retire_requests(struct drm_i915_private *dev_priv);
> +void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
>  void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
>  
>  static inline u32 i915_reset_counter(struct i915_gpu_error *error)
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index f4e550ddaa5d..5a7131b749a2 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2554,6 +2554,26 @@ i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
>  	return 0;
>  }
>  
> +static void i915_gem_mark_busy(struct drm_i915_private *dev_priv,
> +			       const struct intel_engine_cs *engine)
> +{
> +	dev_priv->gt.active_engines |= intel_engine_flag(engine);
> +	if (dev_priv->gt.awake)
> +		return;
> +
> +	intel_runtime_pm_get_noresume(dev_priv);
> +	dev_priv->gt.awake = true;
> +
> +	intel_enable_gt_powersave(dev_priv);
> +	i915_update_gfx_val(dev_priv);
> +	if (INTEL_INFO(dev_priv)->gen >= 6)
> +		gen6_rps_busy(dev_priv);
> +
> +	queue_delayed_work(dev_priv->wq,
> +			   &dev_priv->gt.retire_work,
> +			   round_jiffies_up_relative(HZ));
> +}
> +
>  /*
>   * NB: This function is not allowed to fail. Doing so would mean the the
>   * request is not being tracked for completion but the work itself is
> @@ -2640,12 +2660,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	}
>  	/* Not allowed to fail! */
>  	WARN(ret, "emit|add_request failed: %d!\n", ret);
> -
> -	queue_delayed_work(dev_priv->wq,
> -			   &dev_priv->mm.retire_work,
> -			   round_jiffies_up_relative(HZ));
> -	intel_mark_busy(dev_priv);
> -
>  	/* Sanity check that the reserved size was large enough. */
>  	ret = intel_ring_get_tail(ringbuf) - request_start;
>  	if (ret < 0)
> @@ -2654,6 +2668,8 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  		  "Not enough space reserved (%d bytes) "
>  		  "for adding the request (%d bytes)\n",
>  		  reserved_tail, ret);
> +
> +	i915_gem_mark_busy(dev_priv, engine);
>  }
>  
>  static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
> @@ -2968,46 +2984,47 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
>  	WARN_ON(i915_verify_lists(engine->dev));
>  }
>  
> -bool
> -i915_gem_retire_requests(struct drm_i915_private *dev_priv)
> +void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
>  {
>  	struct intel_engine_cs *engine;
> -	bool idle = true;
> +
> +	if (dev_priv->gt.active_engines == 0)
> +		return;
> +
> +	GEM_BUG_ON(!dev_priv->gt.awake);
>  
>  	for_each_engine(engine, dev_priv) {
>  		i915_gem_retire_requests_ring(engine);
> -		idle &= list_empty(&engine->request_list);
> -		if (i915.enable_execlists) {
> -			spin_lock_bh(&engine->execlist_lock);
> -			idle &= list_empty(&engine->execlist_queue);
> -			spin_unlock_bh(&engine->execlist_lock);
> -		}

As discussed in IRC, this disappearing could be mentioned in the commit
message.

> +		if (list_empty(&engine->request_list))
> +			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
>  	}
>  
> -	if (idle)
> +	if (dev_priv->gt.active_engines == 0)
>  		mod_delayed_work(dev_priv->wq,
> -				 &dev_priv->mm.idle_work,
> +				 &dev_priv->gt.idle_work,
>  				 msecs_to_jiffies(100));
> -
> -	return idle;
>  }
>  
>  static void
>  i915_gem_retire_work_handler(struct work_struct *work)
>  {
>  	struct drm_i915_private *dev_priv =
> -		container_of(work, typeof(*dev_priv), mm.retire_work.work);
> +		container_of(work, typeof(*dev_priv), gt.retire_work.work);
>  	struct drm_device *dev = dev_priv->dev;
> -	bool idle;
>  
>  	/* Come back later if the device is busy... */
> -	idle = false;
>  	if (mutex_trylock(&dev->struct_mutex)) {
> -		idle = i915_gem_retire_requests(dev_priv);
> +		i915_gem_retire_requests(dev_priv);
>  		mutex_unlock(&dev->struct_mutex);
>  	}
> -	if (!idle)
> -		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
> +
> +	/* Keep the retire handler running until we are finally idle.
> +	 * We do not need to do this test under locking as in the worst-case
> +	 * we queue the retire worker once too often.
> +	 */
> +	if (READ_ONCE(dev_priv->gt.awake))

This is the only occurrance in this function, so don't think we need
READ_ONCE. Not sure if READ_ONCE is good documentation of "read outside
lock", comment might be better.

> +		queue_delayed_work(dev_priv->wq,
> +				   &dev_priv->gt.retire_work,
>  				   round_jiffies_up_relative(HZ));
>  }
>  
> @@ -3015,25 +3032,36 @@ static void
>  i915_gem_idle_work_handler(struct work_struct *work)
>  {
>  	struct drm_i915_private *dev_priv =
> -		container_of(work, typeof(*dev_priv), mm.idle_work.work);
> +		container_of(work, typeof(*dev_priv), gt.idle_work.work);
>  	struct drm_device *dev = dev_priv->dev;
>  	struct intel_engine_cs *engine;
>  
> -	for_each_engine(engine, dev_priv)
> -		if (!list_empty(&engine->request_list))
> -			return;
> +	if (!READ_ONCE(dev_priv->gt.awake))
> +		return;
>  
> -	/* we probably should sync with hangcheck here, using cancel_work_sync.
> -	 * Also locking seems to be fubar here, engine->request_list is protected
> -	 * by dev->struct_mutex. */
> +	mutex_lock(&dev->struct_mutex);
> +	if (dev_priv->gt.active_engines)
> +		goto out;
>  
> -	intel_mark_idle(dev_priv);
> +	for_each_engine(engine, dev_priv)
> +		i915_gem_batch_pool_fini(&engine->batch_pool);
>  
> -	if (mutex_trylock(&dev->struct_mutex)) {
> -		for_each_engine(engine, dev_priv)
> -			i915_gem_batch_pool_fini(&engine->batch_pool);
> +	GEM_BUG_ON(!dev_priv->gt.awake);
> +	dev_priv->gt.awake = false;
>  
> -		mutex_unlock(&dev->struct_mutex);
> +	if (INTEL_INFO(dev_priv)->gen >= 6)
> +		gen6_rps_idle(dev_priv);
> +	intel_runtime_pm_put(dev_priv);
> +out:
> +	mutex_unlock(&dev->struct_mutex);
> +
> +	if (!dev_priv->gt.awake &&

No READ_ONCE here, even we just unlocked the mutex. So lacks some
consistency.

Also, this assumes we might be pre-empted between unlocking mutex and
making this test, so I'm little bit confused. Do you want to optimize
by avoiding calling cancel_delayed_work_sync?

> +	    cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work)) {
> +		unsigned stuck = intel_kick_waiters(dev_priv);
> +		if (unlikely(stuck)) {
> +			DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
> +			dev_priv->gpu_error.missed_irq_rings |= stuck;
> +		}
>  	}
>  }
>  
> @@ -4154,7 +4182,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
>  
>  	ret = __i915_wait_request(target, true, NULL, NULL);
>  	if (ret == 0)
> -		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
> +		queue_delayed_work(dev_priv->wq, &dev_priv->gt.retire_work, 0);
>  
>  	i915_gem_request_unreference(target);
>  
> @@ -4672,13 +4700,13 @@ i915_gem_suspend(struct drm_device *dev)
>  	mutex_unlock(&dev->struct_mutex);
>  
>  	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
> -	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
> -	flush_delayed_work(&dev_priv->mm.idle_work);
> +	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
> +	flush_delayed_work(&dev_priv->gt.idle_work);
>  
>  	/* Assert that we sucessfully flushed all the work and
>  	 * reset the GPU back to its idle, low power state.
>  	 */
> -	WARN_ON(dev_priv->mm.busy);
> +	WARN_ON(dev_priv->gt.awake);
>  
>  	return 0;
>  
> @@ -4982,9 +5010,9 @@ i915_gem_load_init(struct drm_device *dev)
>  		init_engine_lists(&dev_priv->engine[i]);
>  	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
>  		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
> -	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
> +	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
>  			  i915_gem_retire_work_handler);
> -	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
> +	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
>  			  i915_gem_idle_work_handler);
>  	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
>  	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 8097698b9622..d3297dab0298 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1477,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  		dispatch_flags |= I915_DISPATCH_RS;
>  	}
>  
> +	/* Take a local wakeref for preparing to dispatch the execbuf as
> +	 * we expect to access the hardware fairly frequently in the
> +	 * process. Upon first dispatch, we acquire another prolonged
> +	 * wakeref that we hold until the GPU has been idle for at least
> +	 * 100ms.
> +	 */
>  	intel_runtime_pm_get(dev_priv);
>  
>  	ret = i915_mutex_lock_interruptible(dev);
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index f74f5727ea77..7a2dc8f1f64e 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -3102,12 +3102,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
>  	if (!i915.enable_hangcheck)
>  		return;
>  
> -	/*
> -	 * The hangcheck work is synced during runtime suspend, we don't
> -	 * require a wakeref. TODO: instead of disabling the asserts make
> -	 * sure that we hold a reference when this work is running.
> -	 */
> -	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
> +	if (!READ_ONCE(dev_priv->gt.awake))
> +		return;
>  
>  	/* As enabling the GPU requires fairly extensive mmio access,
>  	 * periodically arm the mmio checker to see if we are triggering
> @@ -3215,17 +3211,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
>  		}
>  	}
>  
> -	if (rings_hung) {
> +	if (rings_hung)
>  		i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
> -		goto out;
> -	}
>  
>  	/* Reset timer in case GPU hangs without another request being added */
>  	if (busy_count)
>  		i915_queue_hangcheck(dev_priv);
> -
> -out:
> -	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
>  }
>  
>  static void ibx_irq_reset(struct drm_device *dev)
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index bb09ee6d1a3f..14e41fdd8112 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -10969,32 +10969,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
>  	return mode;
>  }
>  
> -void intel_mark_busy(struct drm_i915_private *dev_priv)
> -{
> -	if (dev_priv->mm.busy)
> -		return;
> -
> -	intel_runtime_pm_get(dev_priv);
> -	intel_enable_gt_powersave(dev_priv);
> -	i915_update_gfx_val(dev_priv);
> -	if (INTEL_GEN(dev_priv) >= 6)
> -		gen6_rps_busy(dev_priv);
> -	dev_priv->mm.busy = true;
> -}
> -
> -void intel_mark_idle(struct drm_i915_private *dev_priv)
> -{
> -	if (!dev_priv->mm.busy)
> -		return;
> -
> -	dev_priv->mm.busy = false;
> -
> -	if (INTEL_GEN(dev_priv) >= 6)
> -		gen6_rps_idle(dev_priv);
> -
> -	intel_runtime_pm_put(dev_priv);
> -}
> -
>  static void intel_crtc_destroy(struct drm_crtc *crtc)
>  {
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 712bd0debb91..35bb9a23cd2d 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -4850,7 +4850,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
>  	/* This is intentionally racy! We peek at the state here, then
>  	 * validate inside the RPS worker.
>  	 */
> -	if (!(dev_priv->mm.busy &&
> +	if (!(dev_priv->gt.awake &&
>  	      dev_priv->rps.enabled &&
>  	      dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
>  		return;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 166f1a3829b0..d0cd9a1aa80e 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -372,13 +372,13 @@ struct intel_engine_cs {
>  };
>  
>  static inline bool
> -intel_engine_initialized(struct intel_engine_cs *engine)
> +intel_engine_initialized(const struct intel_engine_cs *engine)
>  {
>  	return engine->i915 != NULL;
>  }
>  
>  static inline unsigned
> -intel_engine_flag(struct intel_engine_cs *engine)
> +intel_engine_flag(const struct intel_engine_cs *engine)
>  {
>  	return 1 << engine->id;
>  }

I think majority of our functions are not const-correct, I remember
some grunting on the subject when I tried to change some to be. But I'm
all for it myself.

Regards, Joonas

-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation


More information about the Intel-gfx mailing list