[PATCH v8 04/10] drm/sched: cleanup gpu_scheduler trace events
Tvrtko Ursulin
tursulin at ursulin.net
Thu Mar 20 10:46:20 UTC 2025
On 20/03/2025 09:58, Pierre-Eric Pelloux-Prayer wrote:
> A fence uniquely identify a job, so this commits updates the places
> where a kernel pointer was used as an identifier by:
>
> "fence=%llu:%llu"
>
> Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer at amd.com>
> ---
> .../gpu/drm/scheduler/gpu_scheduler_trace.h | 45 ++++++++++---------
> 1 file changed, 24 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> index 713df3516a17..21a85ee59066 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
> @@ -36,28 +36,29 @@ DECLARE_EVENT_CLASS(drm_sched_job,
> TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
> TP_ARGS(sched_job, entity),
> TP_STRUCT__entry(
> - __field(struct drm_sched_entity *, entity)
> - __field(struct dma_fence *, fence)
> - __string(name, sched_job->sched->name)
> __field(uint64_t, id)
> + __string(name, sched_job->sched->name)
> __field(u32, job_count)
> __field(int, hw_job_count)
> __string(dev, dev_name(sched_job->sched->dev))
> + __field(u64, fence_context)
> + __field(u64, fence_seqno)
> ),
>
> TP_fast_assign(
> - __entry->entity = entity;
> __entry->id = sched_job->id;
> - __entry->fence = &sched_job->s_fence->finished;
> __assign_str(name);
> __entry->job_count = spsc_queue_count(&entity->job_queue);
> __entry->hw_job_count = atomic_read(
> &sched_job->sched->credit_count);
> __assign_str(dev);
> + __entry->fence_context = sched_job->s_fence->finished.context;
> + __entry->fence_seqno = sched_job->s_fence->finished.seqno;
> +
You add this blank line only to remove in the following patch.
Otherwise LGTM.
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>
Regards,
Tvrtko
> ),
> - TP_printk("dev=%s, entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
> - __get_str(dev), __entry->entity, __entry->id,
> - __entry->fence, __get_str(name),
> + TP_printk("dev=%s, id=%llu, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d",
> + __get_str(dev), __entry->id,
> + __entry->fence_context, __entry->fence_seqno, __get_str(name),
> __entry->job_count, __entry->hw_job_count)
> );
>
> @@ -75,37 +76,39 @@ TRACE_EVENT(drm_sched_process_job,
> TP_PROTO(struct drm_sched_fence *fence),
> TP_ARGS(fence),
> TP_STRUCT__entry(
> - __field(struct dma_fence *, fence)
> + __field(u64, fence_context)
> + __field(u64, fence_seqno)
> ),
>
> TP_fast_assign(
> - __entry->fence = &fence->finished;
> + __entry->fence_context = fence->finished.context;
> + __entry->fence_seqno = fence->finished.seqno;
> ),
> - TP_printk("fence=%p signaled", __entry->fence)
> + TP_printk("fence=%llu:%llu signaled",
> + __entry->fence_context, __entry->fence_seqno)
> );
>
> TRACE_EVENT(drm_sched_job_wait_dep,
> TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
> TP_ARGS(sched_job, fence),
> TP_STRUCT__entry(
> - __string(name, sched_job->sched->name)
> + __field(u64, fence_context)
> + __field(u64, fence_seqno)
> __field(uint64_t, id)
> - __field(struct dma_fence *, fence)
> - __field(uint64_t, ctx)
> - __field(unsigned, seqno)
> + __field(u64, ctx)
> + __field(u64, seqno)
> ),
>
> TP_fast_assign(
> - __assign_str(name);
> + __entry->fence_context = sched_job->s_fence->finished.context;
> + __entry->fence_seqno = sched_job->s_fence->finished.seqno;
> __entry->id = sched_job->id;
> - __entry->fence = fence;
> __entry->ctx = fence->context;
> __entry->seqno = fence->seqno;
> ),
> - TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
> - __get_str(name), __entry->id,
> - __entry->fence, __entry->ctx,
> - __entry->seqno)
> + TP_printk("fence=%llu:%llu, id=%llu depends on unsignalled fence=%llu:%llu",
> + __entry->fence_context, __entry->fence_seqno, __entry->id,
> + __entry->ctx, __entry->seqno)
> );
>
> #endif /* _GPU_SCHED_TRACE_H_ */
More information about the dri-devel
mailing list