[Intel-gfx] [CI 9/9] drm/i915: Prefer software tracked context busyness
Chris Wilson
chris at chris-wilson.co.uk
Fri Jan 22 12:29:03 UTC 2021
From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
When available prefer context tracked context busyness because it provides
visibility into currently executing contexts as well.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Reviewed-by: Aravind Iddamsetty <aravind.iddamsetty at intel.com>
Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_drm_client.c | 64 ++++++++++++++++++++++++--
1 file changed, 59 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index 5c5ba90e2f8e..bd49ec7369d9 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -96,6 +96,58 @@ show_client_busy(struct device *kdev, struct device_attribute *attr, char *buf)
return snprintf(buf, PAGE_SIZE, "%llu\n", total);
}
+static u64 sw_busy_add(struct i915_gem_context *ctx, unsigned int class)
+{
+ struct i915_gem_engines *engines = rcu_dereference(ctx->engines);
+ u32 period_ns = ctx->i915->gt.clock_period_ns;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ u64 total = 0;
+
+ for_each_gem_engine(ce, engines, it) {
+ struct intel_context_stats *stats;
+ unsigned int seq;
+ u64 t;
+
+ if (ce->engine->uabi_class != class)
+ continue;
+
+ stats = &ce->stats;
+
+ do {
+ seq = read_seqbegin(&stats->lock);
+ t = ce->stats.runtime.total * period_ns;
+ t += __intel_context_get_active_time(ce);
+ } while (read_seqretry(&stats->lock, seq));
+
+ total += t;
+ }
+
+ return total;
+}
+
+static ssize_t
+show_client_sw_busy(struct device *kdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i915_engine_busy_attribute *i915_attr =
+ container_of(attr, typeof(*i915_attr), attr);
+ unsigned int class = i915_attr->engine_class;
+ struct i915_drm_client *client = i915_attr->client;
+ const u32 period_ns = client->clients->i915->gt.clock_period_ns;
+ u64 total = atomic64_read(&client->past_runtime[class]) * period_ns;
+ struct list_head *list = &client->ctx_list;
+ struct i915_gem_context *ctx;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ctx, list, client_link)
+ total += sw_busy_add(ctx, class);
+ rcu_read_unlock();
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", total);
+}
+
static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "0",
[I915_ENGINE_CLASS_COPY] = "1",
@@ -106,6 +158,8 @@ static const char * const uabi_class_names[] = {
static int __client_register_sysfs_busy(struct i915_drm_client *client)
{
struct i915_drm_clients *clients = client->clients;
+ bool sw_stats = clients->i915->caps.scheduler &
+ I915_SCHEDULER_CAP_ENGINE_BUSY_STATS;
unsigned int i;
int ret = 0;
@@ -131,18 +185,18 @@ static int __client_register_sysfs_busy(struct i915_drm_client *client)
attr->attr.name = uabi_class_names[i];
attr->attr.mode = 0444;
- attr->show = show_client_busy;
+ attr->show = sw_stats ? show_client_sw_busy : show_client_busy;
ret = sysfs_create_file(client->busy_root,
(struct attribute *)attr);
if (ret)
- goto err;
+ goto out;
}
- return 0;
+out:
+ if (ret)
+ kobject_put(client->busy_root);
-err:
- kobject_put(client->busy_root);
return ret;
}
--
2.20.1
More information about the Intel-gfx
mailing list