[PATCH v2] drm/xe: Add DRM_XE_LOW_LEVEL_TRACEPOINTS

Lucas De Marchi lucas.demarchi at intel.com
Thu Jan 25 06:20:32 UTC 2024


On Wed, Jan 24, 2024 at 03:37:12PM -0800, Matthew Brost wrote:
>Hide all Xe tracepoints behind DRM_XE_LOW_LEVEL_TRACEPOINTS as these can
>be considered uABI. In Xe these are subject to change so hide these
>behind Kconfig option DRM_XE_LOW_LEVEL_TRACEPOINTS. In addition to
>possible uABI considerations these are low level developer tracepoints
>only needed in debug kernel builds.
>
>v2:
>- Reword commit message (Lucas)

sorry, but still not convinced. Looking at some past discussion
makes me even more sure we shouldn't add this config:

https://lore.kernel.org/all/20181217211417.1b9628fe@vmware.local.home/t/

I'd love these tracepoints to be relevant and an actual
problem, so we are careful on updates. They aren't.
Hiding them like this will

a) prevent them to be used and
b) not solve the problem at all since setting CONFIG_DRM_XE_LOW_LEVEL_TRACEPOINTS
    doesn't change if it is or is not uabi.

In i915 there was at least the worry that it was known the tracepoints
would break soon (not sure the reason presented there actually mattered
in the end as the updates in the header seem to be due to other reasons).

	git grep -l TRACE_HEADER_MULTI_READ | xargs git log --no-merges -p --

shows tracepoints changing all throughou the kernel without regard to
uabi. Are all of them wrong?

+Steven who may provide more input if anything changed since 2018
or if I'm completely wrong here.


Lucas De Marchi


>
>Cc: Lucas De Marchi <lucas.demarchi at intel.com>
>Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
>Signed-off-by: Matthew Brost <matthew.brost at intel.com>
>---
> drivers/gpu/drm/xe/Kconfig.debug |  10 +
> drivers/gpu/drm/xe/Makefile      |   4 +-
> drivers/gpu/drm/xe/xe_trace.h    | 339 +++++++++++++++++++++++++++++++
> 3 files changed, 352 insertions(+), 1 deletion(-)
>
>diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
>index 549065f57a78..a8785e71e224 100644
>--- a/drivers/gpu/drm/xe/Kconfig.debug
>+++ b/drivers/gpu/drm/xe/Kconfig.debug
>@@ -105,3 +105,13 @@ config DRM_XE_USERPTR_INVAL_INJECT
>
> 	 Recomended for driver developers only.
> 	 If in doubt, say "N".
>+
>+config DRM_XE_LOW_LEVEL_TRACEPOINTS
>+       bool "Enable low level trace events"
>+       default n
>+       help
>+         Choose this option to enable low level trace events.
>+
>+	 Recomended for driver developers only.
>+
>+	 If in doubt, say "N".
>diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
>index abb2be8268d0..9e591571b2ff 100644
>--- a/drivers/gpu/drm/xe/Makefile
>+++ b/drivers/gpu/drm/xe/Makefile
>@@ -130,7 +130,6 @@ xe-y += xe_bb.o \
> 	xe_sync.o \
> 	xe_tile.o \
> 	xe_tile_sysfs.o \
>-	xe_trace.o \
> 	xe_ttm_sys_mgr.o \
> 	xe_ttm_stolen_mgr.o \
> 	xe_ttm_vram_mgr.o \
>@@ -144,6 +143,9 @@ xe-y += xe_bb.o \
> 	xe_wa.o \
> 	xe_wopcm.o
>
>+# trace support
>+xe-$(CONFIG_DRM_XE_LOW_LEVEL_TRACEPOINTS) += xe_trace.o
>+
> # graphics hardware monitoring (HWMON) support
> xe-$(CONFIG_HWMON) += xe_hwmon.o
>
>diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
>index e4e7262191ad..831925a3204b 100644
>--- a/drivers/gpu/drm/xe/xe_trace.h
>+++ b/drivers/gpu/drm/xe/xe_trace.h
>@@ -21,6 +21,7 @@
> #include "xe_sched_job.h"
> #include "xe_vm.h"
>
>+#if defined(CONFIG_DRM_XE_LOW_LEVEL_TRACEPOINTS)
> DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
> 		    TP_PROTO(struct xe_gt_tlb_invalidation_fence *fence),
> 		    TP_ARGS(fence),
>@@ -597,6 +598,344 @@ DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
> 			     __entry->tail, __entry->_head)
>
> );
>+#else
>+#if !defined(TRACE_HEADER_MULTI_READ)
>+static inline void
>+trace_xe_gt_tlb_invalidation_fence_create(struct xe_gt_tlb_invalidation_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_gt_tlb_invalidation_fence_work_func(struct xe_gt_tlb_invalidation_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_gt_tlb_invalidation_fence_cb(struct xe_gt_tlb_invalidation_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_gt_tlb_invalidation_fence_send(struct xe_gt_tlb_invalidation_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_gt_tlb_invalidation_fence_recv(struct xe_gt_tlb_invalidation_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_gt_tlb_invalidation_fence_timeout(struct xe_gt_tlb_invalidation_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_bo_cpu_fault(struct xe_bo *bo)
>+{
>+}
>+
>+static inline void
>+trace_xe_bo_move(struct xe_bo *bo)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_create(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_supress_resume(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_submit(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_scheduling_enable(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_scheduling_disable(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_scheduling_done(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_register(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_deregister(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_deregister_done(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_close(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_kill(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_cleanup_entity(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_destroy(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_reset(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_memory_cat_error(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_stop(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_resubmit(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_exec_queue_lr_cleanup(struct xe_exec_queue *q)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_job_create(struct xe_sched_job *job)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_job_exec(struct xe_sched_job *job)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_job_run(struct xe_sched_job *job)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_job_free(struct xe_sched_job *job)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_job_timedout(struct xe_sched_job *job)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_job_set_error(struct xe_sched_job *job)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_job_ban(struct xe_sched_job *job)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_msg_add(struct xe_sched_msg *msg)
>+{
>+}
>+
>+static inline void
>+trace_xe_sched_msg_recv(struct xe_sched_msg *msg)
>+{
>+}
>+
>+static inline void
>+trace_xe_hw_fence_create(struct xe_hw_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_hw_fence_signal(struct xe_hw_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_hw_fence_try_signal(struct xe_hw_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_hw_fence_free(struct xe_hw_fence *fence)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_flush(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_pagefault(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_acc(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_fail(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_bind(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_pf_bind(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_unbind(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_userptr_rebind_worker(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_userptr_rebind_exec(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_rebind_worker(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_rebind_exec(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_userptr_invalidate(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_usm_invalidate(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_evict(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vma_userptr_invalidate_complete(struct xe_vma *vma)
>+{
>+}
>+
>+static inline void
>+trace_xe_vm_kill(struct xe_vm *vm)
>+{
>+}
>+
>+static inline void
>+trace_xe_vm_create(struct xe_vm *vm)
>+{
>+}
>+
>+static inline void
>+trace_xe_vm_free(struct xe_vm *vm)
>+{
>+}
>+
>+static inline void
>+trace_xe_vm_cpu_bind(struct xe_vm *vm)
>+{
>+}
>+
>+static inline void
>+trace_xe_vm_restart(struct xe_vm *vm)
>+{
>+}
>+
>+static inline void
>+trace_xe_vm_rebind_worker_enter(struct xe_vm *vm)
>+{
>+}
>+
>+static inline void
>+trace_xe_vm_rebind_worker_retry(struct xe_vm *vm)
>+{
>+}
>+
>+static inline void
>+trace_xe_vm_rebind_worker_exit(struct xe_vm *vm)
>+{
>+}
>+
>+static inline void
>+trace_xe_guc_ct_h2g_flow_control(u32 _head, u32 _tail, u32 size, u32 space, u32 len)
>+{
>+}
>+
>+static inline void
>+trace_xe_guc_ct_g2h_flow_control(u32 _head, u32 _tail, u32 size, u32 space, u32 len)
>+{
>+}
>+
>+static inline void
>+trace_xe_guc_ctb_h2g(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail)
>+{
>+}
>+
>+static inline void
>+trace_xe_guc_ctb_g2h(u8 gt_id, u32 action, u32 len, u32 _head, u32 tail)
>+{
>+}
>+#endif
>+#endif
>
> #endif
>
>-- 
>2.34.1
>


More information about the Intel-xe mailing list