[PATCH v3 8/8] drm/amdgpu: add devname to trace_amdgpu_sched_run_job
Pierre-Eric Pelloux-Prayer
pierre-eric.pelloux-prayer at amd.com
Fri Feb 16 15:09:57 UTC 2024
With the move to work queues for the drm scheduler it becomes
impossible for a tool to match the events to the GPU.
Before this move, the event source was fixed (eg: gfx_0.0.0-598),
so even if the system had multiple GPUs with identical queue names
it was possible to map the events using the PID.
With work queues, the source is now something like: "kworker/u64:0-15248"
(and the PID isn't stable), so the "timeline=gfx_0.0.0" attribute
isn't enough in multi-GPU setups.
This commit adds a dev=devname attribute to resolve this issue.
Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 12 ++++++++----
2 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 71a5cf37b472..657866a498f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -292,7 +292,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
job = to_amdgpu_job(sched_job);
finished = &job->base.s_fence->finished;
- trace_amdgpu_sched_run_job(job);
+ trace_amdgpu_sched_run_job(job, adev);
/* Skip job if VRAM is lost and never resubmit gangs */
if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 3f18f570e5ac..1aea1b78747d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -202,8 +202,8 @@ TRACE_EVENT(amdgpu_cs_start,
);
TRACE_EVENT(amdgpu_sched_run_job,
- TP_PROTO(struct amdgpu_job *job),
- TP_ARGS(job),
+ TP_PROTO(struct amdgpu_job *job, struct amdgpu_device *adev),
+ TP_ARGS(job, adev),
TP_STRUCT__entry(
__field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
@@ -211,6 +211,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__field(unsigned int, seqno)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
+ __string(dname, dev_name(adev->dev))
),
TP_fast_assign(
@@ -220,10 +221,13 @@ TRACE_EVENT(amdgpu_sched_run_job,
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring, to_amdgpu_ring(job->base.sched)->name);
__entry->num_ibs = job->num_ibs;
+ __assign_str(dname, dev_name(adev->dev));
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
+ TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, "
+ "ring_name=%s, num_ibs=%u, dev=%s",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __get_str(ring), __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs, __get_str(dname))
+
);
--
2.40.1
More information about the amd-gfx
mailing list