[PATCH 14/15] drm/i915: Export per-client debug tracing

Chris Wilson chris at chris-wilson.co.uk
Tue Jan 19 00:31:23 UTC 2021


Rather than put sensitive, and often voluminous, user details into a
global dmesg, report the error and debug messages directly back to the
client via the kernel tracing mechanism.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Steven Rostedt (VMware) <rostedt at goodmis.org>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  71 ++++++-----
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 120 ++++++++++--------
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     |   6 +-
 drivers/gpu/drm/i915/i915_drm_client.c        |  13 ++
 drivers/gpu/drm/i915/i915_drm_client.h        |   4 +
 drivers/gpu/drm/i915/i915_gem.c               |   2 -
 6 files changed, 123 insertions(+), 93 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 1a0f96ca7962..f843dab15174 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -84,6 +84,8 @@
 
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 
+#define CTX_TRACE(ctx, ...) TRACE((ctx)->file_priv->client, __VA_ARGS__)
+
 static struct i915_global_gem_context {
 	struct i915_global base;
 	struct kmem_cache *slab_luts;
@@ -157,8 +159,12 @@ lookup_user_engine(struct i915_gem_context *ctx,
 		engine = intel_engine_lookup_user(ctx->i915,
 						  ci->engine_class,
 						  ci->engine_instance);
-		if (!engine)
+		if (!engine) {
+			CTX_TRACE(ctx,
+				  "Unknown engine {class:%d, instance:%d}\n",
+				  ci->engine_class, ci->engine_instance);
 			return ERR_PTR(-EINVAL);
+		}
 
 		idx = engine->legacy_idx;
 	} else {
@@ -870,8 +876,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 
 		ppgtt = i915_ppgtt_create(&i915->gt);
 		if (IS_ERR(ppgtt)) {
-			drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
-				PTR_ERR(ppgtt));
 			context_close(ctx);
 			return ERR_CAST(ppgtt);
 		}
@@ -1609,15 +1613,16 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
 		return -EFAULT;
 
 	if (idx >= set->engines->num_engines) {
-		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
-			idx, set->engines->num_engines);
+		CTX_TRACE(set->ctx,
+			  "Invalid placement value, %d >= %d\n",
+			  idx, set->engines->num_engines);
 		return -EINVAL;
 	}
 
 	idx = array_index_nospec(idx, set->engines->num_engines);
 	if (set->engines->engines[idx]) {
-		drm_dbg(&i915->drm,
-			"Invalid placement[%d], already occupied\n", idx);
+		CTX_TRACE(set->ctx,
+			  "Invalid placement[%d], already occupied\n", idx);
 		return -EEXIST;
 	}
 
@@ -1653,9 +1658,9 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
 						       ci.engine_class,
 						       ci.engine_instance);
 		if (!siblings[n]) {
-			drm_dbg(&i915->drm,
-				"Invalid sibling[%d]: { class:%d, inst:%d }\n",
-				n, ci.engine_class, ci.engine_instance);
+			CTX_TRACE(set->ctx,
+				  "Invalid sibling[%d]: { class:%d, inst:%d }\n",
+				  n, ci.engine_class, ci.engine_instance);
 			err = -EINVAL;
 			goto out_siblings;
 		}
@@ -1699,15 +1704,15 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
 		return -EFAULT;
 
 	if (idx >= set->engines->num_engines) {
-		drm_dbg(&i915->drm,
-			"Invalid index for virtual engine: %d >= %d\n",
-			idx, set->engines->num_engines);
+		CTX_TRACE(set->ctx,
+			  "Invalid index for virtual engine: %d >= %d\n",
+			  idx, set->engines->num_engines);
 		return -EINVAL;
 	}
 
 	idx = array_index_nospec(idx, set->engines->num_engines);
 	if (!set->engines->engines[idx]) {
-		drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
+		CTX_TRACE(set->ctx, "Invalid engine at %d\n", idx);
 		return -EINVAL;
 	}
 	virtual = set->engines->engines[idx]->engine;
@@ -1728,9 +1733,9 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
 	master = intel_engine_lookup_user(i915,
 					  ci.engine_class, ci.engine_instance);
 	if (!master) {
-		drm_dbg(&i915->drm,
-			"Unrecognised master engine: { class:%u, instance:%u }\n",
-			ci.engine_class, ci.engine_instance);
+		CTX_TRACE(set->ctx,
+			  "Unrecognised master engine: { class:%u, instance:%u }\n",
+			  ci.engine_class, ci.engine_instance);
 		return -EINVAL;
 	}
 
@@ -1747,9 +1752,9 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
 						ci.engine_class,
 						ci.engine_instance);
 		if (!bond) {
-			drm_dbg(&i915->drm,
-				"Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
-				n, ci.engine_class, ci.engine_instance);
+			CTX_TRACE(set->ctx,
+				  "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+				  n, ci.engine_class, ci.engine_instance);
 			return -EINVAL;
 		}
 
@@ -1778,7 +1783,6 @@ static int
 set_engines(struct i915_gem_context *ctx,
 	    const struct drm_i915_gem_context_param *args)
 {
-	struct drm_i915_private *i915 = ctx->i915;
 	struct i915_context_param_engines __user *user =
 		u64_to_user_ptr(args->value);
 	struct set_engines set = { .ctx = ctx };
@@ -1800,8 +1804,9 @@ set_engines(struct i915_gem_context *ctx,
 	BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
 	if (args->size < sizeof(*user) ||
 	    !IS_ALIGNED(args->size, sizeof(*user->engines))) {
-		drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
-			args->size);
+		CTX_TRACE(ctx,
+			  "Invalid size for engine array: %d\n",
+			  args->size);
 		return -EINVAL;
 	}
 
@@ -1834,9 +1839,9 @@ set_engines(struct i915_gem_context *ctx,
 						  ci.engine_class,
 						  ci.engine_instance);
 		if (!engine) {
-			drm_dbg(&i915->drm,
-				"Invalid engine[%d]: { class:%d, instance:%d }\n",
-				n, ci.engine_class, ci.engine_instance);
+			CTX_TRACE(ctx,
+				  "Invalid engine[%d]: { class:%d, instance:%d }\n",
+				  n, ci.engine_class, ci.engine_instance);
 			__free_engines(set.engines, n);
 			return -ENOENT;
 		}
@@ -1993,8 +1998,13 @@ static int set_priority(struct i915_gem_context *ctx,
 		return -ENODEV;
 
 	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
-	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
+	    priority < I915_CONTEXT_MIN_USER_PRIORITY) {
+		CTX_TRACE(ctx, "priority %d out-of-range [%d, %d]\n",
+			  priority,
+			  I915_CONTEXT_MAX_USER_PRIORITY,
+			  I915_CONTEXT_MIN_USER_PRIORITY);
 		return -EINVAL;
+	}
 
 	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
 	    !capable(CAP_SYS_NICE))
@@ -2350,9 +2360,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 
 	ext_data.fpriv = file->driver_priv;
 	if (client_is_banned(ext_data.fpriv)) {
-		drm_dbg(&i915->drm,
-			"client %s[%d] banned from creating ctx\n",
-			current->comm, task_pid_nr(current));
+		TRACE(ext_data.fpriv->client,
+		      "banned from creating new contexts\n");
 		return -EIO;
 	}
 
@@ -2378,7 +2387,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 		goto err_ctx;
 
 	args->ctx_id = id;
-	drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
+	TRACE(ext_data.fpriv->client, "HW context %d created\n", args->ctx_id);
 
 	return 0;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b91b32195dcf..f8dc8d1f45e9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -310,6 +310,16 @@ static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb,
 					  bool throttle);
 static void eb_unpin_engine(struct i915_execbuffer *eb);
 
+static inline struct i915_drm_client *file_client(struct drm_file *f)
+{
+	struct drm_i915_file_private *fp = f->driver_priv;
+
+	return fp->client;
+}
+
+#define FILE_TRACE(F, ...) TRACE(file_client(F), __VA_ARGS__)
+#define EB_TRACE(E, ...) FILE_TRACE((E)->file, __VA_ARGS__)
+
 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
 {
 	return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -505,6 +515,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
 	} else {
 		entry->pad_to_size = 0;
 	}
+
 	/*
 	 * From drm_mm perspective address space is continuous,
 	 * so from this point we're always using non-canonical
@@ -830,7 +841,6 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
 
 static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
-	struct drm_i915_private *i915 = eb->i915;
 	unsigned int batch = eb_batch_index(eb);
 	unsigned int i;
 	int err = 0;
@@ -856,22 +866,21 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	}
 
 	if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
-		drm_dbg(&i915->drm,
-			"Attempting to use self-modifying batch buffer\n");
+		EB_TRACE(eb, "Attempting to use self-modifying batch buffer\n");
 		return -EINVAL;
 	}
 
 	if (range_overflows_t(u64,
 			      eb->batch_start_offset, eb->batch_len,
 			      eb->batch->vma->size)) {
-		drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
+		EB_TRACE(eb, "Attempting to use out-of-bounds batch\n");
 		return -EINVAL;
 	}
 
 	if (eb->batch_len == 0)
 		eb->batch_len = eb->batch->vma->size - eb->batch_start_offset;
 	if (unlikely(eb->batch_len == 0)) { /* impossible! */
-		drm_dbg(&i915->drm, "Invalid batch length\n");
+		EB_TRACE(eb, "Invalid batch length\n");
 		return -EINVAL;
 	}
 
@@ -1573,7 +1582,6 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 		  struct eb_vma *ev,
 		  const struct drm_i915_gem_relocation_entry *reloc)
 {
-	struct drm_i915_private *i915 = eb->i915;
 	struct eb_vma *target;
 	int err;
 
@@ -1584,24 +1592,26 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 
 	/* Validate that the target is in a valid r/w GPU domain */
 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
-		drm_dbg(&i915->drm, "reloc with multiple write domains: "
-			  "target %d offset %d "
-			  "read %08x write %08x",
-			  reloc->target_handle,
-			  (int) reloc->offset,
-			  reloc->read_domains,
-			  reloc->write_domain);
+		EB_TRACE(eb,
+			 "reloc with multiple write domains: "
+			 "target %d offset %d "
+			 "read %08x write %08x",
+			 reloc->target_handle,
+			 (int)reloc->offset,
+			 reloc->read_domains,
+			 reloc->write_domain);
 		return -EINVAL;
 	}
 	if (unlikely((reloc->write_domain | reloc->read_domains)
 		     & ~I915_GEM_GPU_DOMAINS)) {
-		drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
-			  "target %d offset %d "
-			  "read %08x write %08x",
-			  reloc->target_handle,
-			  (int) reloc->offset,
-			  reloc->read_domains,
-			  reloc->write_domain);
+		EB_TRACE(eb,
+			 "reloc with read/write non-GPU domains: "
+			 "target %d offset %d "
+			 "read %08x write %08x",
+			 reloc->target_handle,
+			 (int)reloc->offset,
+			 reloc->read_domains,
+			 reloc->write_domain);
 		return -EINVAL;
 	}
 
@@ -1635,18 +1645,20 @@ eb_relocate_entry(struct i915_execbuffer *eb,
 	/* Check that the relocation address is valid... */
 	if (unlikely(reloc->offset >
 		     ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
-		drm_dbg(&i915->drm, "Relocation beyond object bounds: "
-			  "target %d offset %d size %d.\n",
-			  reloc->target_handle,
-			  (int)reloc->offset,
-			  (int)ev->vma->size);
+		EB_TRACE(eb,
+			 "Relocation beyond object bounds: "
+			 "target %d offset %d size %d.\n",
+			 reloc->target_handle,
+			 (int)reloc->offset,
+			 (int)ev->vma->size);
 		return -EINVAL;
 	}
 	if (unlikely(reloc->offset & 3)) {
-		drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
-			  "target %d offset %d.\n",
-			  reloc->target_handle,
-			  (int)reloc->offset);
+		EB_TRACE(eb,
+			 "Relocation not 4-byte aligned: "
+			 "target %d offset %d.\n",
+			 reloc->target_handle,
+			 (int)reloc->offset);
 		return -EINVAL;
 	}
 
@@ -2221,13 +2233,14 @@ static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 	return 0;
 }
 
-static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
+static int gen7_sol_reset(struct i915_execbuffer *eb)
 {
+	struct i915_request *rq = eb->request;
 	u32 *cs;
 	int i;
 
 	if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) {
-		drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
+		EB_TRACE(eb, "sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
@@ -2426,7 +2439,6 @@ static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i9
 
 static int eb_parse(struct i915_execbuffer *eb)
 {
-	struct drm_i915_private *i915 = eb->i915;
 	struct intel_gt_buffer_pool_node *pool = eb->batch_pool;
 	struct i915_vma *shadow, *trampoline, *batch;
 	unsigned long len;
@@ -2447,8 +2459,7 @@ static int eb_parse(struct i915_execbuffer *eb)
 		 * post-scan tampering
 		 */
 		if (!eb->context->vm->has_read_only) {
-			drm_dbg(&i915->drm,
-				"Cannot prevent post-scan tampering without RO capable vm\n");
+			EB_TRACE(eb, "Cannot prevent post-scan tampering without RO capable vm\n");
 			return -EINVAL;
 		}
 	} else {
@@ -2542,7 +2553,7 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
 		return err;
 
 	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		err = i915_reset_gen7_sol_offsets(eb->request);
+		err = gen7_sol_reset(eb);
 		if (err)
 			return err;
 	}
@@ -2715,9 +2726,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb)
 
 	if (user_ring_id != I915_EXEC_BSD &&
 	    (args->flags & I915_EXEC_BSD_MASK)) {
-		drm_dbg(&i915->drm,
-			"execbuf with non bsd ring but with invalid "
-			"bsd dispatch flags: %d\n", (int)(args->flags));
+		EB_TRACE(eb,
+			 "execbuf with non bsd ring but with invalid "
+			 "bsd dispatch flags: %d\n", (int)(args->flags));
 		return -1;
 	}
 
@@ -2731,9 +2742,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb)
 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
 			bsd_idx--;
 		} else {
-			drm_dbg(&i915->drm,
-				"execbuf with unknown bsd ring: %u\n",
-				bsd_idx);
+			EB_TRACE(eb,
+				 "execbuf with unknown bsd ring: %u\n",
+				 bsd_idx);
 			return -1;
 		}
 
@@ -2741,8 +2752,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb)
 	}
 
 	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
-		drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
-			user_ring_id);
+		EB_TRACE(eb, "execbuf with unknown ring: %u\n",
+			 user_ring_id);
 		return -1;
 	}
 
@@ -3408,7 +3419,6 @@ int
 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *file)
 {
-	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_execbuffer *args = data;
 	struct drm_i915_gem_execbuffer2 exec2;
 	struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -3418,7 +3428,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 	int err;
 
 	if (!check_buffer_count(count)) {
-		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
+		FILE_TRACE(file, "execbuf2 with %zd buffers\n", count);
 		return -EINVAL;
 	}
 
@@ -3445,9 +3455,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 	exec2_list = kvmalloc_array(count + 2, eb_element_size(),
 				    __GFP_NOWARN | GFP_KERNEL);
 	if (exec_list == NULL || exec2_list == NULL) {
-		drm_dbg(&i915->drm,
-			"Failed to allocate exec list for %d buffers\n",
-			args->buffer_count);
+		FILE_TRACE(file,
+			   "Failed to allocate exec list for %d buffers\n",
+			   args->buffer_count);
 		kvfree(exec_list);
 		kvfree(exec2_list);
 		return -ENOMEM;
@@ -3456,8 +3466,8 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
 			     u64_to_user_ptr(args->buffers_ptr),
 			     sizeof(*exec_list) * count);
 	if (err) {
-		drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
-			args->buffer_count, err);
+		FILE_TRACE(file, "copy %d exec entries failed %d\n",
+			   args->buffer_count, err);
 		kvfree(exec_list);
 		kvfree(exec2_list);
 		return -EFAULT;
@@ -3504,14 +3514,13 @@ int
 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file)
 {
-	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_execbuffer2 *args = data;
 	struct drm_i915_gem_exec_object2 *exec2_list;
 	const size_t count = args->buffer_count;
 	int err;
 
 	if (!check_buffer_count(count)) {
-		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
+		FILE_TRACE(file, "execbuf2 with %zd buffers\n", count);
 		return -EINVAL;
 	}
 
@@ -3523,14 +3532,15 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 	exec2_list = kvmalloc_array(count + 2, eb_element_size(),
 				    __GFP_NOWARN | GFP_KERNEL);
 	if (exec2_list == NULL) {
-		drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
-			count);
+		FILE_TRACE(file,
+			   "Failed to allocate exec list for %zd buffers\n",
+			   count);
 		return -ENOMEM;
 	}
 	if (copy_from_user(exec2_list,
 			   u64_to_user_ptr(args->buffers_ptr),
 			   sizeof(*exec2_list) * count)) {
-		drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
+		FILE_TRACE(file, "copy %zd exec entries failed\n", count);
 		kvfree(exec2_list);
 		return -EFAULT;
 	}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 3db3c667c486..3f4c46f373c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -85,14 +85,10 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 
 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	int err;
 
-	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
-		drm_dbg(&i915->drm,
-			"Attempting to obtain a purgeable object\n");
+	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED))
 		return -EFAULT;
-	}
 
 	err = obj->ops->get_pages(obj);
 	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index a6ead3eb41f8..c2ca3ff8b965 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/trace.h>
 
 #include <drm/drm_print.h>
 
@@ -315,6 +316,13 @@ static void __client_unregister_sysfs(struct i915_drm_client *client)
 	kobject_put(fetch_and_zero(&client->root));
 }
 
+static int __client_register_trace(struct i915_drm_client *client)
+{
+	return trace_create_kernfs(client->root->sd, "trace",
+				   0400, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+				   client->trace);
+}
+
 static struct i915_drm_client_name *get_name(struct i915_drm_client *client,
 					     struct task_struct *task)
 {
@@ -366,6 +374,9 @@ __i915_drm_client_register(struct i915_drm_client *client,
 	if (ret)
 		goto err_sysfs;
 
+	client->trace = trace_array_create();
+	__client_register_trace(client);
+
 	return 0;
 
 err_sysfs:
@@ -389,6 +400,8 @@ static void __i915_drm_client_unregister(struct i915_drm_client *client)
 	hash_del_rcu(&name->node);
 	spin_unlock(&clients->pid_lock);
 
+	trace_array_destroy(client->trace);
+
 	call_rcu(&name->rcu, free_name);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index 6b8d05e9c838..5e2175a54b06 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -15,6 +15,7 @@
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
 #include <linux/spinlock.h>
+#include <linux/trace.h>
 #include <linux/xarray.h>
 
 #include "gt/intel_engine_types.h"
@@ -58,6 +59,8 @@ struct i915_drm_client {
 	struct i915_drm_client_name __rcu *name;
 	bool closed;
 
+	struct trace_array *trace;
+
 	spinlock_t ctx_lock; /* For add/remove from ctx_list. */
 	struct list_head ctx_list; /* List of contexts belonging to client. */
 
@@ -76,6 +79,7 @@ struct i915_drm_client {
 	 */
 	atomic64_t past_runtime[MAX_ENGINE_CLASS + 1];
 };
+#define TRACE(c, ...) trace_array_printk((c)->trace, _THIS_IP_,  __VA_ARGS__)
 
 void i915_drm_clients_init(struct i915_drm_clients *clients,
 			   struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f7c9b3413d8e..9acee02fd587 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1200,8 +1200,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
 	struct i915_drm_client *client;
 	int ret = -ENOMEM;
 
-	DRM_DEBUG("\n");
-
 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
 	if (!file_priv)
 		goto err_alloc;
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list