[Intel-gfx] [RFC 5/7] drm/i915: Expose per-engine client busyness

Tvrtko Ursulin tursulin at ursulin.net
Tue Apr 17 12:27:34 UTC 2018


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Expose per-client and per-engine busyness under the previously added sysfs
client root.

The new files are one per-engine instance and located under the 'busy'
directory.

Each contains a monotonically increasing nano-second resolution times each
client's jobs were executing on the GPU.

$ cat /sys/class/drm/card0/clients/5/busy/rcs0
32516602

This data can serve as an interface to implement a top like utility for
GPU jobs. For instance I have prototyped a tool in IGT which produces
periodic output like:

neverball[  6011]:  rcs0:  41.01%  bcs0:   0.00%  vcs0:   0.00%  vecs0:   0.00%
     Xorg[  5664]:  rcs0:  31.16%  bcs0:   0.00%  vcs0:   0.00%  vecs0:   0.00%
    xfwm4[  5727]:  rcs0:   0.00%  bcs0:   0.00%  vcs0:   0.00%  vecs0:   0.00%

This tools can also be extended to use the i915 PMU and show overall engine
busyness, and engine loads using the queue depth metric.

v2: Use intel_context_engine_get_busy_time.
v3: New directory structure.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h |  8 ++++
 drivers/gpu/drm/i915/i915_gem.c | 81 +++++++++++++++++++++++++++++++++++++++--
 2 files changed, 86 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 456dc495e017..f83b8dcac16b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -317,6 +317,12 @@ struct drm_i915_private;
 struct i915_mm_struct;
 struct i915_mmu_object;
 
+struct i915_engine_busy_attribute {
+	struct device_attribute attr;
+	struct drm_i915_file_private *file_priv;
+	struct intel_engine_cs *engine;
+};
+
 struct drm_i915_file_private {
 	struct drm_i915_private *dev_priv;
 	struct drm_file *file;
@@ -355,10 +361,12 @@ struct drm_i915_file_private {
 		char *name;
 
 		struct kobject *root;
+ 		struct kobject *busy_root;
 
 		struct {
 			struct device_attribute pid;
 			struct device_attribute name;
+			struct i915_engine_busy_attribute busy[I915_NUM_ENGINES];
 		} attr;
 	} client;
 };
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4c20545e3539..6a550fb110fb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5728,6 +5728,38 @@ show_client_pid(struct device *kdev, struct device_attribute *attr, char *buf)
 	return snprintf(buf, PAGE_SIZE, "%u", file_priv->client.pid);
 }
 
+struct busy_ctx {
+	struct intel_engine_cs *engine;
+	u64 total;
+};
+
+static int busy_add(int _id, void *p, void *data)
+{
+	struct i915_gem_context *ctx = p;
+	struct busy_ctx *bc = data;
+
+	bc->total +=
+		ktime_to_ns(intel_context_engine_get_busy_time(ctx,
+							       bc->engine));
+
+	return 0;
+}
+
+static ssize_t
+show_client_busy(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+	struct i915_engine_busy_attribute *i915_attr =
+		container_of(attr, typeof(*i915_attr), attr);
+	struct drm_i915_file_private *file_priv = i915_attr->file_priv;
+	struct busy_ctx bc = { .engine = i915_attr->engine };
+
+	rcu_read_lock();
+	idr_for_each(&file_priv->context_idr, busy_add, &bc);
+	rcu_read_unlock();
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n", bc.total);
+}
+
 int
 i915_gem_add_client(struct drm_i915_private *i915,
 		struct drm_i915_file_private *file_priv,
@@ -5735,8 +5767,10 @@ i915_gem_add_client(struct drm_i915_private *i915,
 		unsigned int serial)
 {
 	int ret = -ENOMEM;
+	struct intel_engine_cs *engine;
 	struct device_attribute *attr;
-	char id[32];
+	enum intel_engine_id id, id2;
+	char idstr[32];
 
 	if (!i915->clients.root)
 		goto err_name;
@@ -5745,8 +5779,8 @@ i915_gem_add_client(struct drm_i915_private *i915,
 	if (!file_priv->client.name)
 		goto err_name;
 
-	snprintf(id, sizeof(id), "%u", serial);
-	file_priv->client.root = kobject_create_and_add(id,
+	snprintf(idstr, sizeof(idstr), "%u", serial);
+	file_priv->client.root = kobject_create_and_add(idstr,
 							i915->clients.root);
 	if (!file_priv->client.root)
 		goto err_client;
@@ -5771,10 +5805,41 @@ i915_gem_add_client(struct drm_i915_private *i915,
 	if (ret)
 		goto err_attr_pid;
 
+	file_priv->client.busy_root =
+			kobject_create_and_add("busy", file_priv->client.root);
+	if (!file_priv->client.busy_root)
+		goto err_busy_root;
+
+	for_each_engine(engine, i915, id) {
+		file_priv->client.attr.busy[id].file_priv = file_priv;
+		file_priv->client.attr.busy[id].engine = engine;
+		attr = &file_priv->client.attr.busy[id].attr;
+		attr->attr.name = engine->name;
+		attr->attr.mode = 0444;
+		attr->show = show_client_busy;
+
+		ret = sysfs_create_file(file_priv->client.busy_root,
+				        (struct attribute *)attr);
+		if (ret)
+			goto err_attr_busy;
+	}
+
 	file_priv->client.pid = pid_nr(get_task_pid(task, PIDTYPE_PID));
 
 	return 0;
 
+err_attr_busy:
+	for_each_engine(engine, i915, id2) {
+		if (id2 == id)
+			break;
+
+		sysfs_remove_file(file_priv->client.busy_root,
+				  (struct attribute *)&file_priv->client.attr.busy[id2]);
+	}
+	kobject_put(file_priv->client.busy_root);
+err_busy_root:
+	sysfs_remove_file(file_priv->client.root,
+			  (struct attribute *)&file_priv->client.attr.pid);
 err_attr_pid:
 	sysfs_remove_file(file_priv->client.root,
 			  (struct attribute *)&file_priv->client.attr.name);
@@ -5788,10 +5853,20 @@ i915_gem_add_client(struct drm_i915_private *i915,
 
 void i915_gem_remove_client(struct drm_i915_file_private *file_priv)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, file_priv->dev_priv, id)
+		sysfs_remove_file(file_priv->client.busy_root,
+				  (struct attribute *)&file_priv->client.attr.busy[id]);
+
+	kobject_put(file_priv->client.busy_root);
+
 	sysfs_remove_file(file_priv->client.root,
 			  (struct attribute *)&file_priv->client.attr.pid);
 	sysfs_remove_file(file_priv->client.root,
 			  (struct attribute *)&file_priv->client.attr.name);
+
 	kobject_put(file_priv->client.root);
 	kfree(file_priv->client.name);
 }
-- 
2.14.1



More information about the Intel-gfx mailing list