[Intel-gfx] [PATCH 1/3] drm/i915/trace: Describe engines as class:instance pairs
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Tue Jun 5 13:43:36 UTC 2018
On 05/06/2018 10:41, Lionel Landwerlin wrote:
> Any update on this series?
>
> (my comments were a bit fuzzy, but I gave Rb on patch 1 & 2).
Forgot about it for a bit. Just sent updated 3/3.
Was you r-b for 2/3 for v1 or v2 at the end?
Regards,
Tvrtko
>
> Cheers,
>
> -
> Lionel
>
> On 25/05/18 09:26, Tvrtko Ursulin wrote:
>> From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>>
>> Instead of using the engine->id, use uabi_class:instance pairs in trace-
>> points including engine info.
>>
>> This will be more readable, more future proof and more stable for
>> userspace consumption.
>>
>> v2:
>> * Use u16 for class and instance. (Chris Wilson)
>>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
>> Cc: Chris Wilson <chris at chris-wilson.co.uk>
>> Cc: svetlana.kukanova at intel.com
>> Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
>> ---
>> drivers/gpu/drm/i915/i915_trace.h | 107 ++++++++++++++++++------------
>> 1 file changed, 65 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_trace.h
>> b/drivers/gpu/drm/i915/i915_trace.h
>> index 5d4f78765083..7acea4052798 100644
>> --- a/drivers/gpu/drm/i915/i915_trace.h
>> +++ b/drivers/gpu/drm/i915/i915_trace.h
>> @@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to,
>> TP_STRUCT__entry(
>> __field(u32, dev)
>> - __field(u32, sync_from)
>> - __field(u32, sync_to)
>> + __field(u32, from_class)
>> + __field(u32, from_instance)
>> + __field(u32, to_class)
>> + __field(u32, to_instance)
>> __field(u32, seqno)
>> ),
>> TP_fast_assign(
>> __entry->dev = from->i915->drm.primary->index;
>> - __entry->sync_from = from->engine->id;
>> - __entry->sync_to = to->engine->id;
>> + __entry->from_class = from->engine->uabi_class;
>> + __entry->from_instance = from->engine->instance;
>> + __entry->to_class = to->engine->uabi_class;
>> + __entry->to_instance = to->engine->instance;
>> __entry->seqno = from->global_seqno;
>> ),
>> - TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
>> + TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
>> __entry->dev,
>> - __entry->sync_from, __entry->sync_to,
>> + __entry->from_class, __entry->from_instance,
>> + __entry->to_class, __entry->to_instance,
>> __entry->seqno)
>> );
>> @@ -616,7 +621,8 @@ TRACE_EVENT(i915_request_queue,
>> TP_STRUCT__entry(
>> __field(u32, dev)
>> __field(u32, hw_id)
>> - __field(u32, ring)
>> + __field(u16, class)
>> + __field(u16, instance)
>> __field(u32, ctx)
>> __field(u32, seqno)
>> __field(u32, flags)
>> @@ -625,15 +631,17 @@ TRACE_EVENT(i915_request_queue,
>> TP_fast_assign(
>> __entry->dev = rq->i915->drm.primary->index;
>> __entry->hw_id = rq->gem_context->hw_id;
>> - __entry->ring = rq->engine->id;
>> + __entry->class = rq->engine->uabi_class;
>> + __entry->instance = rq->engine->instance;
>> __entry->ctx = rq->fence.context;
>> __entry->seqno = rq->fence.seqno;
>> __entry->flags = flags;
>> ),
>> - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u,
>> flags=0x%x",
>> - __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
>> - __entry->seqno, __entry->flags)
>> + TP_printk("dev=%u, hw_id=%u, engine=%u:%u, ctx=%u, seqno=%u,
>> flags=0x%x",
>> + __entry->dev, __entry->hw_id, __entry->class,
>> + __entry->instance, __entry->ctx, __entry->seqno,
>> + __entry->flags)
>> );
>> DECLARE_EVENT_CLASS(i915_request,
>> @@ -643,7 +651,8 @@ DECLARE_EVENT_CLASS(i915_request,
>> TP_STRUCT__entry(
>> __field(u32, dev)
>> __field(u32, hw_id)
>> - __field(u32, ring)
>> + __field(u16, class)
>> + __field(u16, instance)
>> __field(u32, ctx)
>> __field(u32, seqno)
>> __field(u32, global)
>> @@ -652,15 +661,17 @@ DECLARE_EVENT_CLASS(i915_request,
>> TP_fast_assign(
>> __entry->dev = rq->i915->drm.primary->index;
>> __entry->hw_id = rq->gem_context->hw_id;
>> - __entry->ring = rq->engine->id;
>> + __entry->class = rq->engine->uabi_class;
>> + __entry->instance = rq->engine->instance;
>> __entry->ctx = rq->fence.context;
>> __entry->seqno = rq->fence.seqno;
>> __entry->global = rq->global_seqno;
>> ),
>> - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u,
>> global=%u",
>> - __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
>> - __entry->seqno, __entry->global)
>> + TP_printk("dev=%u, hw_id=%u, engine=%u:%u, ctx=%u, seqno=%u,
>> global=%u",
>> + __entry->dev, __entry->hw_id, __entry->class,
>> + __entry->instance, __entry->ctx, __entry->seqno,
>> + __entry->global)
>> );
>> DEFINE_EVENT(i915_request, i915_request_add,
>> @@ -686,7 +697,8 @@ TRACE_EVENT(i915_request_in,
>> TP_STRUCT__entry(
>> __field(u32, dev)
>> __field(u32, hw_id)
>> - __field(u32, ring)
>> + __field(u16, class)
>> + __field(u16, instance)
>> __field(u32, ctx)
>> __field(u32, seqno)
>> __field(u32, global_seqno)
>> @@ -697,7 +709,8 @@ TRACE_EVENT(i915_request_in,
>> TP_fast_assign(
>> __entry->dev = rq->i915->drm.primary->index;
>> __entry->hw_id = rq->gem_context->hw_id;
>> - __entry->ring = rq->engine->id;
>> + __entry->class = rq->engine->uabi_class;
>> + __entry->instance = rq->engine->instance;
>> __entry->ctx = rq->fence.context;
>> __entry->seqno = rq->fence.seqno;
>> __entry->global_seqno = rq->global_seqno;
>> @@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in,
>> __entry->port = port;
>> ),
>> - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u,
>> prio=%u, global=%u, port=%u",
>> - __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
>> - __entry->seqno, __entry->prio, __entry->global_seqno,
>> - __entry->port)
>> + TP_printk("dev=%u, hw_id=%u, engine=%u:%u, ctx=%u, seqno=%u,
>> prio=%u, global=%u, port=%u",
>> + __entry->dev, __entry->hw_id, __entry->class,
>> + __entry->instance, __entry->ctx, __entry->seqno,
>> + __entry->prio, __entry->global_seqno, __entry->port)
>> );
>> TRACE_EVENT(i915_request_out,
>> @@ -718,7 +731,8 @@ TRACE_EVENT(i915_request_out,
>> TP_STRUCT__entry(
>> __field(u32, dev)
>> __field(u32, hw_id)
>> - __field(u32, ring)
>> + __field(u16, class)
>> + __field(u16, instance)
>> __field(u32, ctx)
>> __field(u32, seqno)
>> __field(u32, global_seqno)
>> @@ -728,16 +742,17 @@ TRACE_EVENT(i915_request_out,
>> TP_fast_assign(
>> __entry->dev = rq->i915->drm.primary->index;
>> __entry->hw_id = rq->gem_context->hw_id;
>> - __entry->ring = rq->engine->id;
>> + __entry->class = rq->engine->uabi_class;
>> + __entry->instance = rq->engine->instance;
>> __entry->ctx = rq->fence.context;
>> __entry->seqno = rq->fence.seqno;
>> __entry->global_seqno = rq->global_seqno;
>> __entry->completed = i915_request_completed(rq);
>> ),
>> - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u,
>> global=%u, completed?=%u",
>> - __entry->dev, __entry->hw_id, __entry->ring,
>> - __entry->ctx, __entry->seqno,
>> + TP_printk("dev=%u, hw_id=%u, engine=%u:%u, ctx=%u,
>> seqno=%u, global=%u, completed?=%u",
>> + __entry->dev, __entry->hw_id, __entry->class,
>> + __entry->instance, __entry->ctx, __entry->seqno,
>> __entry->global_seqno, __entry->completed)
>> );
>> @@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify,
>> TP_STRUCT__entry(
>> __field(u32, dev)
>> - __field(u32, ring)
>> + __field(u16, class)
>> + __field(u16, instance)
>> __field(u32, seqno)
>> __field(bool, waiters)
>> ),
>> TP_fast_assign(
>> __entry->dev = engine->i915->drm.primary->index;
>> - __entry->ring = engine->id;
>> + __entry->class = engine->uabi_class;
>> + __entry->instance = engine->instance;
>> __entry->seqno = intel_engine_get_seqno(engine);
>> __entry->waiters = waiters;
>> ),
>> - TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
>> - __entry->dev, __entry->ring, __entry->seqno,
>> - __entry->waiters)
>> + TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
>> + __entry->dev, __entry->class, __entry->instance,
>> + __entry->seqno, __entry->waiters)
>> );
>> DEFINE_EVENT(i915_request, i915_request_retire,
>> @@ -800,7 +817,8 @@ TRACE_EVENT(i915_request_wait_begin,
>> TP_STRUCT__entry(
>> __field(u32, dev)
>> __field(u32, hw_id)
>> - __field(u32, ring)
>> + __field(u16, class)
>> + __field(u16, instance)
>> __field(u32, ctx)
>> __field(u32, seqno)
>> __field(u32, global)
>> @@ -816,17 +834,19 @@ TRACE_EVENT(i915_request_wait_begin,
>> TP_fast_assign(
>> __entry->dev = rq->i915->drm.primary->index;
>> __entry->hw_id = rq->gem_context->hw_id;
>> - __entry->ring = rq->engine->id;
>> + __entry->class = rq->engine->uabi_class;
>> + __entry->instance = rq->engine->instance;
>> __entry->ctx = rq->fence.context;
>> __entry->seqno = rq->fence.seqno;
>> __entry->global = rq->global_seqno;
>> __entry->flags = flags;
>> ),
>> - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u,
>> global=%u, blocking=%u, flags=0x%x",
>> - __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
>> - __entry->seqno, __entry->global,
>> - !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
>> + TP_printk("dev=%u, hw_id=%u, engine=%u:%u, ctx=%u, seqno=%u,
>> global=%u, blocking=%u, flags=0x%x",
>> + __entry->dev, __entry->hw_id, __entry->class,
>> + __entry->instance, __entry->ctx, __entry->seqno,
>> + __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
>> + __entry->flags)
>> );
>> DEFINE_EVENT(i915_request, i915_request_wait_end,
>> @@ -966,21 +986,24 @@ TRACE_EVENT(switch_mm,
>> TP_ARGS(engine, to),
>> TP_STRUCT__entry(
>> - __field(u32, ring)
>> + __field(u16, class)
>> + __field(u16, instance)
>> __field(struct i915_gem_context *, to)
>> __field(struct i915_address_space *, vm)
>> __field(u32, dev)
>> ),
>> TP_fast_assign(
>> - __entry->ring = engine->id;
>> + __entry->class = engine->uabi_class;
>> + __entry->instance = engine->instance;
>> __entry->to = to;
>> __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
>> __entry->dev = engine->i915->drm.primary->index;
>> ),
>> - TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
>> - __entry->dev, __entry->ring, __entry->to, __entry->vm)
>> + TP_printk("dev=%u, engine=%u:%u, ctx=%p, ctx_vm=%p",
>> + __entry->dev, __entry->class, __entry->instance, __entry->to,
>> + __entry->vm)
>> );
>> #endif /* _I915_TRACE_H_ */
>
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
More information about the Intel-gfx
mailing list