[Intel-gfx] [PATCH 6/9] drm/i915/pmu: Wire up engine busy stats to PMU

Tvrtko Ursulin tursulin at ursulin.net
Fri Oct 20 09:24:37 UTC 2017


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

We can use engine busy stats instead of the sampling timer for
better accuracy.

By doing this we replace the stohastic sampling with busyness
metric derived directly from engine activity. This is context
switch interrupt driven, so as accurate as we can get from
software tracking.

As a secondary benefit, we can also not run the sampling timer
in cases only busyness metric is enabled.

v2: Rebase.
v3:
 * Rebase, comments.
 * Leave engine busyness controls out of workers.
v4: Checkpatch cleanup.
v5: Added comment to pmu_needs_timer change.
v6:
 * Rebase.
 * Fix style of some comments. (Chris Wilson)
v7: Rebase and commit message update. (Chris Wilson)
v8: Add delayed stats disabling to improve accuracy in face of
    CPU hotplug events.
v9: Rebase.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk> (v7)
---
 drivers/gpu/drm/i915/i915_pmu.c         | 78 ++++++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_ringbuffer.h | 14 ++++++
 2 files changed, 90 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index b36f5e1e0c1d..e08d2bd0c3d8 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -90,6 +90,11 @@ static unsigned int event_enabled_bit(struct perf_event *event)
 	return config_enabled_bit(event->attr.config);
 }
 
+static bool supports_busy_stats(void)
+{
+	return i915_modparams.enable_execlists;
+}
+
 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
 {
 	u64 enable;
@@ -115,6 +120,12 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
 	 */
 	if (!gpu_active)
 		enable &= ~ENGINE_SAMPLE_MASK;
+	/*
+	 * Also there is software busyness tracking available we do not
+	 * need the timer for I915_SAMPLE_BUSY counter.
+	 */
+	else if (supports_busy_stats())
+		enable &= ~BIT(I915_SAMPLE_BUSY);
 
 	/*
 	 * If some bits remain it means we need the sampling timer running.
@@ -363,6 +374,9 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
 
 		if (WARN_ON_ONCE(!engine)) {
 			/* Do nothing */
+		} else if (sample == I915_SAMPLE_BUSY &&
+			   engine->pmu.busy_stats) {
+			val = ktime_to_ns(intel_engine_get_busy_time(engine));
 		} else {
 			val = engine->pmu.sample[sample].cur;
 		}
@@ -399,6 +413,12 @@ static void i915_pmu_event_read(struct perf_event *event)
 	local64_add(new - prev, &event->count);
 }
 
+static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
+{
+	return supports_busy_stats() &&
+	       (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
+}
+
 static void i915_pmu_enable(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
@@ -438,7 +458,21 @@ static void i915_pmu_enable(struct perf_event *event)
 
 		GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
 		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
-		engine->pmu.enable_count[sample]++;
+		if (engine->pmu.enable_count[sample]++ == 0) {
+			/*
+			 * Enable engine busy stats tracking if needed or
+			 * alternatively cancel the scheduled disable.
+			 *
+			 * If the delayed disable was pending, cancel it and
+			 * in this case do not enable since it already is.
+			 */
+			if (engine_needs_busy_stats(engine) &&
+			    !engine->pmu.busy_stats) {
+				engine->pmu.busy_stats = true;
+				if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
+					intel_enable_engine_stats(engine);
+			}
+		}
 	}
 
 	/*
@@ -451,6 +485,14 @@ static void i915_pmu_enable(struct perf_event *event)
 	spin_unlock_irqrestore(&i915->pmu.lock, flags);
 }
 
+static void __disable_busy_stats(struct work_struct *work)
+{
+	struct intel_engine_cs *engine =
+	       container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
+
+	intel_disable_engine_stats(engine);
+}
+
 static void i915_pmu_disable(struct perf_event *event)
 {
 	struct drm_i915_private *i915 =
@@ -474,8 +516,26 @@ static void i915_pmu_disable(struct perf_event *event)
 		 * Decrement the reference count and clear the enabled
 		 * bitmask when the last listener on an event goes away.
 		 */
-		if (--engine->pmu.enable_count[sample] == 0)
+		if (--engine->pmu.enable_count[sample] == 0) {
 			engine->pmu.enable &= ~BIT(sample);
+			if (!engine_needs_busy_stats(engine) &&
+			    engine->pmu.busy_stats) {
+				engine->pmu.busy_stats = false;
+				/*
+				 * We request a delayed disable to handle the
+				 * rapid on/off cycles on events, which can
+				 * happen when tools like perf stat start, in a
+				 * nicer way.
+				 *
+				 * In addition, this also helps with busy stats
+				 * accuracy with background CPU offline/online
+				 * migration events.
+				 */
+				queue_delayed_work(system_wq,
+						   &engine->pmu.disable_busy_stats,
+						   round_jiffies_up_relative(HZ));
+			}
+		}
 	}
 
 	GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
@@ -716,6 +776,8 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
 
 void i915_pmu_register(struct drm_i915_private *i915)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	int ret;
 
 	if (INTEL_GEN(i915) <= 2) {
@@ -737,6 +799,10 @@ void i915_pmu_register(struct drm_i915_private *i915)
 	hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	i915->pmu.timer.function = i915_sample;
 
+	for_each_engine(engine, i915, id)
+		INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
+				  __disable_busy_stats);
+
 	ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
 	if (ret)
 		goto err;
@@ -756,6 +822,9 @@ void i915_pmu_register(struct drm_i915_private *i915)
 
 void i915_pmu_unregister(struct drm_i915_private *i915)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
 	if (!i915->pmu.base.event_init)
 		return;
 
@@ -763,6 +832,11 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
 
 	hrtimer_cancel(&i915->pmu.timer);
 
+	for_each_engine(engine, i915, id) {
+		GEM_BUG_ON(engine->pmu.busy_stats);
+		flush_delayed_work(&engine->pmu.disable_busy_stats);
+	}
+
 	i915_pmu_unregister_cpuhp_state(i915);
 
 	perf_pmu_unregister(&i915->pmu.base);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8ac28ee0ff4e..2d1c256fc3b5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -358,6 +358,20 @@ struct intel_engine_cs {
 		 * Our internal timer stores the current counters in this field.
 		 */
 		struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+		/**
+		 * @busy_stats: Has enablement of engine stats tracking been
+		 * 		requested.
+		 */
+		bool busy_stats;
+		/**
+		 * @disable_busy_stats: Work item for busy stats disabling.
+		 *
+		 * Same as with @enable_busy_stats action, with the difference
+		 * that we delay it in case there are rapid enable-disable
+		 * actions, which can happen during tool startup (like perf
+		 * stat).
+		 */
+		struct delayed_work disable_busy_stats;
 	} pmu;
 
 	/*
-- 
2.9.5



More information about the Intel-gfx mailing list