[Intel-gfx] [RFC 09/14] drm/i915/pmu: Suspend sampling when GPU is idle
Tvrtko Ursulin
tursulin at ursulin.net
Tue Jul 18 14:36:13 UTC 2017
From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
If only a subset of events is enabled we can afford to suspend
the sampling timer when the GPU is idle and so save some cycles
and power.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 6 ++++
drivers/gpu/drm/i915/i915_gem.c | 1 +
drivers/gpu/drm/i915/i915_gem_request.c | 1 +
drivers/gpu/drm/i915/i915_pmu.c | 52 +++++++++++++++++++++++++++++++--
4 files changed, 57 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index de518503e033..f1fded6dd9cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2600,7 +2600,9 @@ struct drm_i915_private {
struct {
struct pmu base;
+ spinlock_t lock;
struct hrtimer timer;
+ bool timer_enabled;
u64 enable;
u64 sample[__I915_NUM_PMU_SAMPLERS];
} pmu;
@@ -3778,9 +3780,13 @@ extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
#ifdef CONFIG_PERF_EVENTS
extern void i915_pmu_register(struct drm_i915_private *i915);
extern void i915_pmu_unregister(struct drm_i915_private *i915);
+extern void i915_pmu_gt_idle(struct drm_i915_private *i915);
+extern void i915_pmu_gt_active(struct drm_i915_private *i915);
#else
static inline void i915_pmu_register(struct drm_i915_private *i915) {}
static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_idle(struct drm_i915_private *i915) {}
+static inline void i915_pmu_gt_active(struct drm_i915_private *i915) {}
#endif
/* i915_suspend.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1b2dfa8bdeef..bb81c1fcbc40 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3235,6 +3235,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
intel_engines_mark_idle(dev_priv);
i915_gem_timelines_mark_idle(dev_priv);
+ i915_pmu_gt_idle(dev_priv);
GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 483af8921060..569c44a6ba2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -876,6 +876,7 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
i915_update_gfx_val(dev_priv);
if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_busy(dev_priv);
+ i915_pmu_gt_active(dev_priv);
queue_delayed_work(dev_priv->wq,
&dev_priv->gt.retire_work,
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 7ea84a191876..4b113cad40d1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -29,6 +29,40 @@ static const unsigned int user_engine_map[I915_NUM_ENGINES] = {
[I915_SAMPLE_VECS] = VECS,
};
+static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
+{
+ if (gpu_active)
+ return i915->pmu.enable;
+ else
+ return i915->pmu.enable >> 32;
+}
+
+void i915_pmu_gt_idle(struct drm_i915_private *i915)
+{
+ spin_lock_irq(&i915->pmu.lock);
+ /*
+ * Signal sampling timer to stop if only engine events are enabled and
+ * GPU went idle.
+ */
+ i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
+ spin_unlock_irq(&i915->pmu.lock);
+}
+
+void i915_pmu_gt_active(struct drm_i915_private *i915)
+{
+ spin_lock_irq(&i915->pmu.lock);
+ /*
+ * Re-enable sampling timer when GPU goes active.
+ */
+ if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
+ hrtimer_start_range_ns(&i915->pmu.timer,
+ ns_to_ktime(PERIOD), 0,
+ HRTIMER_MODE_REL_PINNED);
+ i915->pmu.timer_enabled = true;
+ }
+ spin_unlock_irq(&i915->pmu.lock);
+}
+
static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
{
if (!fw)
@@ -133,7 +167,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
- if (i915->pmu.enable == 0)
+ if (!READ_ONCE(i915->pmu.timer_enabled))
return HRTIMER_NORESTART;
engines_sample(i915);
@@ -307,13 +341,19 @@ static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->pmu.lock, flags);
- if (i915->pmu.enable == 0)
+ i915->pmu.enable |= BIT_ULL(event->attr.config);
+ if (pmu_needs_timer(i915, true) && !i915->pmu.timer_enabled) {
hrtimer_start_range_ns(&i915->pmu.timer,
ns_to_ktime(PERIOD), 0,
HRTIMER_MODE_REL_PINNED);
+ i915->pmu.timer_enabled = true;
+ }
- i915->pmu.enable |= BIT_ULL(event->attr.config);
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
i915_pmu_timer_start(event);
}
@@ -322,8 +362,13 @@ static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
+ unsigned long flags;
+ spin_lock_irqsave(&i915->pmu.lock, flags);
i915->pmu.enable &= ~BIT_ULL(event->attr.config);
+ i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+
i915_pmu_timer_cancel(event);
}
@@ -577,6 +622,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
i915->pmu.base.read = i915_pmu_event_read;
i915->pmu.base.event_idx = i915_pmu_event_event_idx;
+ spin_lock_init(&i915->pmu.lock);
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
i915->pmu.timer.function = i915_sample;
i915->pmu.enable = 0;
--
2.9.4
More information about the Intel-gfx
mailing list