[PATCH 47/54] drm/i915/gt: Enable busy-stats for ring-scheduler
Chris Wilson
chris at chris-wilson.co.uk
Sat Jan 23 16:54:13 UTC 2021
Couple up the context in/out accounting to record how long each engine
is busy handling requests. This is exposed to userspace for more accurate
measurements, and also enables our soft-rps timer.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
.../gpu/drm/i915/gt/intel_ring_scheduler.c | 42 ++++++++++++++++++-
1 file changed, 41 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 372435a6849f..a0978f734e26 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -11,6 +11,7 @@
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
+#include "intel_engine_stats.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
@@ -43,6 +44,34 @@ set_current_context(struct intel_context **ptr, struct intel_context *ce)
intel_context_put(ce);
}
+static inline void runtime_start(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+
+ if (intel_context_is_barrier(ce))
+ return;
+
+ if (stats->active)
+ return;
+
+ WRITE_ONCE(stats->active, ktime_get());
+}
+
+static inline void runtime_stop(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+ ktime_t dt;
+
+ if (!stats->active)
+ return;
+
+ dt = ktime_sub(ktime_get(), stats->active);
+ ewma_runtime_add(&stats->runtime.avg, dt);
+ stats->runtime.total += dt;
+
+ WRITE_ONCE(stats->active, 0);
+}
+
static struct intel_engine_cs *__schedule_in(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
@@ -54,6 +83,8 @@ static struct intel_engine_cs *__schedule_in(struct i915_request *rq)
if (engine->fw_domain && !engine->fw_active++)
intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
+ intel_engine_context_in(engine);
+
CE_TRACE(ce, "schedule-in\n");
return engine;
@@ -87,6 +118,8 @@ static void __schedule_out(struct i915_request *rq)
else
i915_request_update_deadline(list_next_entry(rq, link));
+ intel_engine_context_out(engine);
+
if (engine->fw_domain && !--engine->fw_active)
intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
intel_gt_pm_put_async(engine->gt);
@@ -236,6 +269,8 @@ static void dequeue(struct intel_engine_cs *engine)
*port++ = i915_request_get(last);
*port = NULL;
+ if (!*el->active)
+ runtime_start((*el->pending)->context);
WRITE_ONCE(el->active, el->pending);
copy_ports(el->inflight, el->pending, port - el->pending + 1);
@@ -267,11 +302,15 @@ process_csb(struct intel_engine_execlists *el, struct i915_request **inactive)
struct i915_request *rq;
while ((rq = *el->active)) {
- if (!__i915_request_is_complete(rq))
+ if (!__i915_request_is_complete(rq)) {
+ runtime_start(rq->context);
break;
+ }
*inactive++ = rq;
el->active++;
+
+ runtime_stop(rq->context);
}
return inactive;
@@ -744,6 +783,7 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_SCHEDULER;
engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
--
2.20.1
More information about the Intel-gfx-trybot
mailing list