[CI v3 19/26] drm/xe: Update VM trace events
Oak Zeng
oak.zeng at intel.com
Thu May 30 00:47:25 UTC 2024
From: Matthew Brost <matthew.brost at intel.com>
The trace events have changed moving to a single job per VM bind IOCTL,
update the trace events align with old behavior as much as possible.
Cc: Oak Zeng <oak.zeng at intel.com>
Cc: Thomas Hellström <thomas.hellstrom at linux.intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/xe/xe_trace.h | 10 ++++-----
drivers/gpu/drm/xe/xe_vm.c | 42 +++++++++++++++++++++++++++++++++--
2 files changed, 45 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 450f407c66e8..0f5b9df889b9 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -426,11 +426,6 @@ DEFINE_EVENT(xe_vma, xe_vma_acc,
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_fail,
- TP_PROTO(struct xe_vma *vma),
- TP_ARGS(vma)
-);
-
DEFINE_EVENT(xe_vma, xe_vma_bind,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
@@ -544,6 +539,11 @@ DEFINE_EVENT(xe_vm, xe_vm_rebind_worker_exit,
TP_ARGS(vm)
);
+DEFINE_EVENT(xe_vm, xe_vm_ops_fail,
+ TP_PROTO(struct xe_vm *vm),
+ TP_ARGS(vm)
+);
+
/* GuC */
DECLARE_EVENT_CLASS(xe_guc_ct_flow_control,
TP_PROTO(u32 _head, u32 _tail, u32 size, u32 space, u32 len),
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 551048bff9ce..a205a72f411b 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2476,6 +2476,38 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
return 0;
}
+static void op_trace(struct xe_vma_op *op)
+{
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ trace_xe_vma_bind(op->map.vma);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ trace_xe_vma_unbind(gpuva_to_vma(op->base.remap.unmap->va));
+ if (op->remap.prev)
+ trace_xe_vma_bind(op->remap.prev);
+ if (op->remap.next)
+ trace_xe_vma_bind(op->remap.next);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ trace_xe_vma_unbind(gpuva_to_vma(op->base.unmap.va));
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
+ break;
+ default:
+ XE_WARN_ON("NOT POSSIBLE");
+ }
+}
+
+static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+
+ list_for_each_entry(op, &vops->list, link)
+ op_trace(op);
+}
+
static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
{
struct xe_exec_queue *q = vops->q;
@@ -2519,8 +2551,10 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
if (number_tiles > 1) {
fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
+ if (!fences) {
+ fence = ERR_PTR(-ENOMEM);
+ goto err_trace;
+ }
}
for_each_tile(tile, vm->xe, id) {
@@ -2534,6 +2568,8 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
}
}
+ trace_xe_vm_ops_execute(vops);
+
for_each_tile(tile, vm->xe, id) {
if (!vops->pt_update_ops[id].num_ops)
continue;
@@ -2580,6 +2616,8 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
kfree(fences);
kfree(cf);
+err_trace:
+ trace_xe_vm_ops_fail(vm);
return fence;
}
--
2.26.3
More information about the Intel-xe
mailing list