[Intel-gfx] [PATCH 4/7] drm/i915: Track runtime spent in closed and unreachable GEM contexts
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Thu May 13 10:59:59 UTC 2021
From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
As contexts are abandoned we want to remember how much GPU time they used
(per class) so later we can used it for smarter purposes.
As GEM contexts are closed we want to have the DRM client remember how
much GPU time they used (per class) so later we can used it for smarter
purposes.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Reviewed-by: Aravind Iddamsetty <aravind.iddamsetty at intel.com>
Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20210123153733.18139-5-chris@chris-wilson.co.uk
Link: https://patchwork.freedesktop.org/patch/msgid/20210124153136.19124-5-chris@chris-wilson.co.uk
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 24 +++++++++++++++++++--
drivers/gpu/drm/i915/i915_drm_client.h | 7 ++++++
2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 5ea42d5b0b1a..b8d8366a2cce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -262,23 +262,43 @@ static void free_engines_rcu(struct rcu_head *rcu)
free_engines(engines);
}
+static void accumulate_runtime(struct i915_drm_client *client,
+ struct i915_gem_engines *engines)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ if (!client)
+ return;
+
+ /* Transfer accumulated runtime to the parent GEM context. */
+ for_each_gem_engine(ce, engines, it) {
+ unsigned int class = ce->engine->uabi_class;
+
+ GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
+ atomic64_add(intel_context_get_total_runtime_ns(ce),
+ &client->past_runtime[class]);
+ }
+}
+
static int __i915_sw_fence_call
engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_gem_engines *engines =
container_of(fence, typeof(*engines), fence);
+ struct i915_gem_context *ctx = engines->ctx;
switch (state) {
case FENCE_COMPLETE:
if (!list_empty(&engines->link)) {
- struct i915_gem_context *ctx = engines->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->stale.lock, flags);
list_del(&engines->link);
spin_unlock_irqrestore(&ctx->stale.lock, flags);
}
- i915_gem_context_put(engines->ctx);
+ accumulate_runtime(ctx->client, engines);
+ i915_gem_context_put(ctx);
break;
case FENCE_FREE:
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index 556a59d6b834..6f25e754e978 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -15,6 +15,8 @@
#include <linux/sched.h>
#include <linux/xarray.h>
+#include "gt/intel_engine_types.h"
+
struct drm_i915_private;
struct i915_drm_clients {
@@ -51,6 +53,11 @@ struct i915_drm_client {
struct device_attribute pid;
struct device_attribute name;
} attr;
+
+ /**
+ * @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
+ */
+ atomic64_t past_runtime[MAX_ENGINE_CLASS + 1];
};
void i915_drm_clients_init(struct i915_drm_clients *clients,
--
2.30.2
More information about the Intel-gfx
mailing list