[Intel-gfx] [PATCH] drm/i915/pmu: Use GT parked for estimating RC6 while asleep

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Thu Sep 12 10:19:42 UTC 2019


On 11/09/2019 17:38, Chris Wilson wrote:
> As we track when we put the GT device to sleep upon idling, we can use
> that callback to sample the current rc6 counters and record the
> timestamp for estimating samples after that point while asleep.
> 
> v2: Stick to using ktime_t
> v3: Track user_wakerefs that interfere with the new
> intel_gt_pm_wait_for_idle
> 
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105010
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_pm.c   |  19 ++++
>   drivers/gpu/drm/i915/gt/intel_gt_types.h |   1 +
>   drivers/gpu/drm/i915/i915_debugfs.c      |  22 ++---
>   drivers/gpu/drm/i915/i915_pmu.c          | 120 +++++++++++------------
>   drivers/gpu/drm/i915/i915_pmu.h          |   4 +-
>   5 files changed, 90 insertions(+), 76 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> index 3bd764104d41..45a72cb698db 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> @@ -141,6 +141,21 @@ bool i915_gem_load_power_context(struct drm_i915_private *i915)
>   	return switch_to_kernel_context_sync(&i915->gt);
>   }
>   
> +static void user_forcewake(struct intel_gt *gt, bool suspend)
> +{
> +	int count = atomic_read(&gt->user_wakeref);
> +
> +	if (likely(!count))
> +		return;
> +
> +	intel_gt_pm_get(gt);
> +	if (suspend)
> +		atomic_sub(count, &gt->wakeref.count);
> +	else
> +		atomic_add(count, &gt->wakeref.count);
> +	intel_gt_pm_put(gt);
> +}
> +
>   void i915_gem_suspend(struct drm_i915_private *i915)
>   {
>   	GEM_TRACE("\n");
> @@ -148,6 +163,8 @@ void i915_gem_suspend(struct drm_i915_private *i915)
>   	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
>   	flush_workqueue(i915->wq);
>   
> +	user_forcewake(&i915->gt, true);
> +
>   	mutex_lock(&i915->drm.struct_mutex);
>   
>   	/*
> @@ -259,6 +276,8 @@ void i915_gem_resume(struct drm_i915_private *i915)
>   	if (!i915_gem_load_power_context(i915))
>   		goto err_wedged;
>   
> +	user_forcewake(&i915->gt, false);
> +
>   out_unlock:
>   	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
>   	mutex_unlock(&i915->drm.struct_mutex);
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> index dc295c196d11..3039cef64b11 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> @@ -50,6 +50,7 @@ struct intel_gt {
>   	} timelines;
>   
>   	struct intel_wakeref wakeref;
> +	atomic_t user_wakeref;
>   
>   	struct list_head closed_vma;
>   	spinlock_t closed_lock; /* guards the list of closed_vma */
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 708855e051b5..f00c0dc4f57e 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -3988,13 +3988,12 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
>   static int i915_forcewake_open(struct inode *inode, struct file *file)
>   {
>   	struct drm_i915_private *i915 = inode->i_private;
> +	struct intel_gt *gt = &i915->gt;
>   
> -	if (INTEL_GEN(i915) < 6)
> -		return 0;
> -
> -	file->private_data =
> -		(void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
> -	intel_uncore_forcewake_user_get(&i915->uncore);
> +	atomic_inc(&gt->user_wakeref);
> +	intel_gt_pm_get(gt);
> +	if (INTEL_GEN(i915) >= 6)
> +		intel_uncore_forcewake_user_get(gt->uncore);
>   
>   	return 0;
>   }
> @@ -4002,13 +4001,12 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
>   static int i915_forcewake_release(struct inode *inode, struct file *file)
>   {
>   	struct drm_i915_private *i915 = inode->i_private;
> +	struct intel_gt *gt = &i915->gt;
>   
> -	if (INTEL_GEN(i915) < 6)
> -		return 0;
> -
> -	intel_uncore_forcewake_user_put(&i915->uncore);
> -	intel_runtime_pm_put(&i915->runtime_pm,
> -			     (intel_wakeref_t)(uintptr_t)file->private_data);
> +	if (INTEL_GEN(i915) >= 6)
> +		intel_uncore_forcewake_user_put(&i915->uncore);
> +	intel_gt_pm_put(gt);
> +	atomic_dec(&gt->user_wakeref);
>   
>   	return 0;
>   }
> diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
> index 8e251e719390..a3d3d3e39c15 100644
> --- a/drivers/gpu/drm/i915/i915_pmu.c
> +++ b/drivers/gpu/drm/i915/i915_pmu.c
> @@ -116,19 +116,51 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
>   	return enable;
>   }
>   
> +static u64 __get_rc6(struct intel_gt *gt)
> +{
> +	struct drm_i915_private *i915 = gt->i915;
> +	u64 val;
> +
> +	val = intel_rc6_residency_ns(i915,
> +				     IS_VALLEYVIEW(i915) ?
> +				     VLV_GT_RENDER_RC6 :
> +				     GEN6_GT_GFX_RC6);
> +
> +	if (HAS_RC6p(i915))
> +		val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
> +
> +	if (HAS_RC6pp(i915))
> +		val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
> +
> +	return val;
> +}
> +
>   void i915_pmu_gt_parked(struct drm_i915_private *i915)
>   {
>   	struct i915_pmu *pmu = &i915->pmu;
> +	u64 val;
>   
>   	if (!pmu->base.event_init)
>   		return;
>   
>   	spin_lock_irq(&pmu->lock);
> +
> +	val = 0;
> +	if (pmu->sample[__I915_SAMPLE_RC6].cur)

Are you using this check here and in unpark effectively as proxy for 
event being enabled? Might be clearer to write it like that.

> +		val = __get_rc6(&i915->gt);
> +
> +	if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
> +		pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
> +		pmu->sample[__I915_SAMPLE_RC6].cur = val;
> +	}

And then this block is the same counter update as in get_rc6 so it would 
be good to consolidate for readability. __pmu_update_rc6(pmu, val)? 
Actually would a single block of:

if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
   __pmu_update_rc6(pmu, __get_rc6(&i915->gt));

Be all that is needed and clear?

> +	pmu->sleep_last = ktime_get();
> +
>   	/*
>   	 * Signal sampling timer to stop if only engine events are enabled and
>   	 * GPU went idle.
>   	 */
>   	pmu->timer_enabled = pmu_needs_timer(pmu, false);
> +
>   	spin_unlock_irq(&pmu->lock);
>   }
>   
> @@ -143,6 +175,11 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
>   	}
>   }
>   
> +static inline s64 ktime_since(const ktime_t kt)
> +{
> +	return ktime_to_ns(ktime_sub(ktime_get(), kt));
> +}
> +
>   void i915_pmu_gt_unparked(struct drm_i915_private *i915)
>   {
>   	struct i915_pmu *pmu = &i915->pmu;
> @@ -151,10 +188,22 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
>   		return;
>   
>   	spin_lock_irq(&pmu->lock);
> +
>   	/*
>   	 * Re-enable sampling timer when GPU goes active.
>   	 */
>   	__i915_pmu_maybe_start_timer(pmu);
> +
> +	/* Estimate how long we slept and accumulate that into rc6 counters */
> +	if (pmu->sample[__I915_SAMPLE_RC6].cur) {
> +		u64 val;
> +
> +		val = ktime_since(pmu->sleep_last);
> +		val += pmu->sample[__I915_SAMPLE_RC6].cur;
> +
> +		pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;

Same treatment for this block? How would it look..

if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
	__pmu_update_estimated_rc6(pmu);

get_rc6() could use the same "val = __pmu_update_estimated_rc6(pmu);" in 
the estimation branch.

Hope I haven't missed something..

Regards,

Tvrtko

> +	}
> +
>   	spin_unlock_irq(&pmu->lock);
>   }
>   
> @@ -426,39 +475,18 @@ static int i915_pmu_event_init(struct perf_event *event)
>   	return 0;
>   }
>   
> -static u64 __get_rc6(struct intel_gt *gt)
> -{
> -	struct drm_i915_private *i915 = gt->i915;
> -	u64 val;
> -
> -	val = intel_rc6_residency_ns(i915,
> -				     IS_VALLEYVIEW(i915) ?
> -				     VLV_GT_RENDER_RC6 :
> -				     GEN6_GT_GFX_RC6);
> -
> -	if (HAS_RC6p(i915))
> -		val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
> -
> -	if (HAS_RC6pp(i915))
> -		val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
> -
> -	return val;
> -}
> -
>   static u64 get_rc6(struct intel_gt *gt)
>   {
> -#if IS_ENABLED(CONFIG_PM)
>   	struct drm_i915_private *i915 = gt->i915;
> -	struct intel_runtime_pm *rpm = &i915->runtime_pm;
>   	struct i915_pmu *pmu = &i915->pmu;
> -	intel_wakeref_t wakeref;
>   	unsigned long flags;
>   	u64 val;
>   
> -	wakeref = intel_runtime_pm_get_if_in_use(rpm);
> -	if (wakeref) {
> +	spin_lock_irqsave(&pmu->lock, flags);
> +
> +	if (intel_gt_pm_get_if_awake(gt)) {
>   		val = __get_rc6(gt);
> -		intel_runtime_pm_put(rpm, wakeref);
> +		intel_gt_pm_put(gt);
>   
>   		/*
>   		 * If we are coming back from being runtime suspended we must
> @@ -466,19 +494,13 @@ static u64 get_rc6(struct intel_gt *gt)
>   		 * previously.
>   		 */
>   
> -		spin_lock_irqsave(&pmu->lock, flags);
> -
>   		if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
>   			pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
>   			pmu->sample[__I915_SAMPLE_RC6].cur = val;
>   		} else {
>   			val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
>   		}
> -
> -		spin_unlock_irqrestore(&pmu->lock, flags);
>   	} else {
> -		struct device *kdev = rpm->kdev;
> -
>   		/*
>   		 * We are runtime suspended.
>   		 *
> @@ -486,42 +508,16 @@ static u64 get_rc6(struct intel_gt *gt)
>   		 * on top of the last known real value, as the approximated RC6
>   		 * counter value.
>   		 */
> -		spin_lock_irqsave(&pmu->lock, flags);
>   
> -		/*
> -		 * After the above branch intel_runtime_pm_get_if_in_use failed
> -		 * to get the runtime PM reference we cannot assume we are in
> -		 * runtime suspend since we can either: a) race with coming out
> -		 * of it before we took the power.lock, or b) there are other
> -		 * states than suspended which can bring us here.
> -		 *
> -		 * We need to double-check that we are indeed currently runtime
> -		 * suspended and if not we cannot do better than report the last
> -		 * known RC6 value.
> -		 */
> -		if (pm_runtime_status_suspended(kdev)) {
> -			val = pm_runtime_suspended_time(kdev);
> +		val = ktime_since(pmu->sleep_last);
> +		val += pmu->sample[__I915_SAMPLE_RC6].cur;
>   
> -			if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
> -				pmu->suspended_time_last = val;
> -
> -			val -= pmu->suspended_time_last;
> -			val += pmu->sample[__I915_SAMPLE_RC6].cur;
> -
> -			pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
> -		} else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
> -			val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
> -		} else {
> -			val = pmu->sample[__I915_SAMPLE_RC6].cur;
> -		}
> -
> -		spin_unlock_irqrestore(&pmu->lock, flags);
> +		pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
>   	}
>   
> +	spin_unlock_irqrestore(&pmu->lock, flags);
> +
>   	return val;
> -#else
> -	return __get_rc6(gt);
> -#endif
>   }
>   
>   static u64 __i915_pmu_event_read(struct perf_event *event)
> diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
> index 4fc4f2478301..067dbbf3bdff 100644
> --- a/drivers/gpu/drm/i915/i915_pmu.h
> +++ b/drivers/gpu/drm/i915/i915_pmu.h
> @@ -97,9 +97,9 @@ struct i915_pmu {
>   	 */
>   	struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
>   	/**
> -	 * @suspended_time_last: Cached suspend time from PM core.
> +	 * @sleep_last: Last time GT parked for RC6 estimation.
>   	 */
> -	u64 suspended_time_last;
> +	ktime_t sleep_last;
>   	/**
>   	 * @i915_attr: Memory block holding device attributes.
>   	 */
> 


More information about the Intel-gfx mailing list