[PATCH] drm/xe: Add DRM_XE_LOW_LEVEL_TRACEPOINTS

Matthew Brost matthew.brost at intel.com
Fri Jan 5 16:45:30 UTC 2024


Trace events are uABI. The Xe trace events currently implemented are
developer only and subject to change. Hide all Xe behind
DRM_XE_LOW_LEVEL_TRACEPOINTS so these are free to change going forward.

Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
 drivers/gpu/drm/xe/Kconfig.debug |  10 +
 drivers/gpu/drm/xe/Makefile      |   4 +-
 drivers/gpu/drm/xe/xe_trace.h    | 339 +++++++++++++++++++++++++++++++
 3 files changed, 352 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 549065f57a78..a8785e71e224 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -105,3 +105,13 @@ config DRM_XE_USERPTR_INVAL_INJECT
 
 	 Recomended for driver developers only.
 	 If in doubt, say "N".
+
+config DRM_XE_LOW_LEVEL_TRACEPOINTS
+       bool "Enable low level trace events"
+       default n
+       help
+         Choose this option to enable low level trace events.
+
+	 Recomended for driver developers only.
+
+	 If in doubt, say "N".
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 53bd2a8ba1ae..9d33e6975ba0 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -129,7 +129,6 @@ xe-y += xe_bb.o \
 	xe_sync.o \
 	xe_tile.o \
 	xe_tile_sysfs.o \
-	xe_trace.o \
 	xe_ttm_sys_mgr.o \
 	xe_ttm_stolen_mgr.o \
 	xe_ttm_vram_mgr.o \
@@ -142,6 +141,9 @@ xe-y += xe_bb.o \
 	xe_wa.o \
 	xe_wopcm.o
 
+# trace support
+xe-$(CONFIG_DRM_XE_LOW_LEVEL_TRACEPOINTS) += xe_trace.o
+
 # graphics hardware monitoring (HWMON) support
 xe-$(CONFIG_HWMON) += xe_hwmon.o
 
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 95163c303f3e..7163ac9b8eae 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -21,6 +21,7 @@
 #include "xe_sched_job.h"
 #include "xe_vm.h"
 
+#if defined(CONFIG_DRM_XE_LOW_LEVEL_TRACEPOINTS)
 DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
 		    TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
 		    TP_ARGS(fence),
@@ -597,6 +598,344 @@ DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
 			     __entry->tail, __entry->_head)
 
 );
+#else
+#if !defined(TRACE_HEADER_MULTI_READ)
+static inline void
+trace_xe_gt_tlb_invalidation_fence_create(struct xe_gt_tlb_invalidation_fence *fence)
+{
+}
+
+static inline void
+trace_xe_gt_tlb_invalidation_fence_work_func(struct xe_gt_tlb_invalidation_fence *fence)
+{
+}
+
+static inline void
+trace_xe_gt_tlb_invalidation_fence_cb(struct xe_gt_tlb_invalidation_fence *fence)
+{
+}
+
+static inline void
+trace_xe_gt_tlb_invalidation_fence_send(struct xe_gt_tlb_invalidation_fence *fence)
+{
+}
+
+static inline void
+trace_xe_gt_tlb_invalidation_fence_recv(struct xe_gt_tlb_invalidation_fence *fence)
+{
+}
+
+static inline void
+trace_xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
+{
+}
+
+static inline void
+trace_xe_gt_tlb_invalidation_fence_timeout(struct xe_gt_tlb_invalidation_fence *fence)
+{
+}
+
+static inline void
+trace_xe_bo_cpu_fault(struct xe_bo *bo)
+{
+}
+
+static inline void
+trace_xe_bo_move(struct xe_bo *bo)
+{
+}
+
+static inline void
+trace_xe_exec_queue_create(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_supress_resume(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_submit(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_scheduling_enable(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_scheduling_disable(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_scheduling_done(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_register(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_deregister(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_deregister_done(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_close(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_kill(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_cleanup_entity(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_destroy(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_reset(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_memory_cat_error(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_stop(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_resubmit(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_exec_queue_lr_cleanup(struct xe_exec_queue *q)
+{
+}
+
+static inline void
+trace_xe_sched_job_create(struct xe_sched_job *job)
+{
+}
+
+static inline void
+trace_xe_sched_job_exec(struct xe_sched_job *job)
+{
+}
+
+static inline void
+trace_xe_sched_job_run(struct xe_sched_job *job)
+{
+}
+
+static inline void
+trace_xe_sched_job_free(struct xe_sched_job *job)
+{
+}
+
+static inline void
+trace_xe_sched_job_timedout(struct xe_sched_job *job)
+{
+}
+
+static inline void
+trace_xe_sched_job_set_error(struct xe_sched_job *job)
+{
+}
+
+static inline void
+trace_xe_sched_job_ban(struct xe_sched_job *job)
+{
+}
+
+static inline void
+trace_xe_sched_msg_add(struct xe_sched_msg *msg)
+{
+}
+
+static inline void
+trace_xe_sched_msg_recv(struct xe_sched_msg *msg)
+{
+}
+
+static inline void
+trace_xe_hw_fence_create(struct xe_hw_fence *fence)
+{
+}
+
+static inline void
+trace_xe_hw_fence_signal(struct xe_hw_fence *fence)
+{
+}
+
+static inline void
+trace_xe_hw_fence_try_signal(struct xe_hw_fence *fence)
+{
+}
+
+static inline void
+trace_xe_hw_fence_free(struct xe_hw_fence *fence)
+{
+}
+
+static inline void
+trace_xe_vma_flush(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_pagefault(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_acc(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_fail(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_bind(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_pf_bind(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_unbind(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_userptr_rebind_worker(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_userptr_rebind_exec(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_rebind_worker(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_rebind_exec(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_userptr_invalidate(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_usm_invalidate(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_evict(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vma_userptr_invalidate_complete(struct xe_vma *vma)
+{
+}
+
+static inline void
+trace_xe_vm_kill(struct xe_vm *vm)
+{
+}
+
+static inline void
+trace_xe_vm_create(struct xe_vm *vm)
+{
+}
+
+static inline void
+trace_xe_vm_free(struct xe_vm *vm)
+{
+}
+
+static inline void
+trace_xe_vm_cpu_bind(struct xe_vm *vm)
+{
+}
+
+static inline void
+trace_xe_vm_restart(struct xe_vm *vm)
+{
+}
+
+static inline void
+trace_xe_vm_rebind_worker_enter(struct xe_vm *vm)
+{
+}
+
+static inline void
+trace_xe_vm_rebind_worker_retry(struct xe_vm *vm)
+{
+}
+
+static inline void
+trace_xe_vm_rebind_worker_exit(struct xe_vm *vm)
+{
+}
+
+static inline void
+trace_xe_guc_ct_h2g_flow_control(u32 _head, u32 _tail, u32 size, u32 space, u32 len)
+{
+}
+
+static inline void
+trace_xe_guc_ct_g2h_flow_control(u32 _head, u32 _tail, u32 size, u32 space, u32 len)
+{
+}
+
+static inline void
+trace_xe_guc_ctb_h2g(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail)
+{
+}
+
+static inline void
+trace_xe_guc_ctb_g2h(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail)
+{
+}
+#endif
+#endif
 
 #endif
 
-- 
2.34.1



More information about the Intel-xe mailing list