[PATCH 3/3] drm: fix the warnning of string style for scheduler trace.
Huang Rui
ray.huang at amd.com
Mon Dec 13 06:34:22 UTC 2021
Use __string(), __assign_str() and __get_str() helpers in the TRACE_EVENT()
instead of string definitions in gpu scheduler trace.
[ 158.890890] ------------[ cut here ]------------
[ 158.890899] fmt: 'entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d
' current_buffer: ' Xorg-1588 [001] ..... 149.391136: drm_sched_job: entity=0000000076f0d517, id=1, fence=000000008dd56028, ring='
[ 158.890910] WARNING: CPU: 6 PID: 1617 at kernel/trace/trace.c:3830 trace_check_vprintf+0x481/0x4a0
Signed-off-by: Huang Rui <ray.huang at amd.com>
---
drivers/gpu/drm/scheduler/gpu_scheduler_trace.h | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index 877ce9b127f1..4e397790c195 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -38,6 +38,7 @@ TRACE_EVENT(drm_sched_job,
TP_STRUCT__entry(
__field(struct drm_sched_entity *, entity)
__field(struct dma_fence *, fence)
+ __string(name, sched_job->sched->name)
__field(const char *, name)
__field(uint64_t, id)
__field(u32, job_count)
@@ -48,14 +49,14 @@ TRACE_EVENT(drm_sched_job,
__entry->entity = entity;
__entry->id = sched_job->id;
__entry->fence = &sched_job->s_fence->finished;
- __entry->name = sched_job->sched->name;
+ __assign_str(name, sched_job->sched->name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
&sched_job->sched->hw_rq_count);
),
TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
__entry->entity, __entry->id,
- __entry->fence, __entry->name,
+ __entry->fence, __get_str(name),
__entry->job_count, __entry->hw_job_count)
);
@@ -65,7 +66,7 @@ TRACE_EVENT(drm_run_job,
TP_STRUCT__entry(
__field(struct drm_sched_entity *, entity)
__field(struct dma_fence *, fence)
- __field(const char *, name)
+ __string(name, sched_job->sched->name)
__field(uint64_t, id)
__field(u32, job_count)
__field(int, hw_job_count)
@@ -75,14 +76,14 @@ TRACE_EVENT(drm_run_job,
__entry->entity = entity;
__entry->id = sched_job->id;
__entry->fence = &sched_job->s_fence->finished;
- __entry->name = sched_job->sched->name;
+ __assign_str(name, sched_job->sched->name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
&sched_job->sched->hw_rq_count);
),
TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
__entry->entity, __entry->id,
- __entry->fence, __entry->name,
+ __entry->fence, __get_str(name),
__entry->job_count, __entry->hw_job_count)
);
@@ -103,7 +104,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __field(const char *,name)
+ __string(name, sched_job->sched->name)
__field(uint64_t, id)
__field(struct dma_fence *, fence)
__field(uint64_t, ctx)
@@ -111,14 +112,14 @@ TRACE_EVENT(drm_sched_job_wait_dep,
),
TP_fast_assign(
- __entry->name = sched_job->sched->name;
+ __assign_str(name, sched_job->sched->name);
__entry->id = sched_job->id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
- __entry->name, __entry->id,
+ __get_str(name), __entry->id,
__entry->fence, __entry->ctx,
__entry->seqno)
);
--
2.25.1
More information about the amd-gfx
mailing list