[Intel-gfx] [PATCH] drm/i915: Pair seqno completion tracepoint with its dispatch
Chris Wilson
chris at chris-wilson.co.uk
Sat Aug 17 23:36:48 CEST 2013
In order to time how long a seqno is executed by a ring, we need to
measure both its insertion and its completion. (Using the completion of
the previous seqno as an estimate for when the GPU starts, if busy.) In
order to get an exact completion timestamp, we need irqs. This is
enabled by trace_i915_gem_ring_dispatch, so it makes more sens to pair
the completion event with that rather than the request.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +-
drivers/gpu/drm/i915/i915_irq.c | 2 +-
drivers/gpu/drm/i915/i915_trace.h | 48 +++++++++++++++---------------
3 files changed, 26 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 88941ba..f5ad477 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1144,7 +1144,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+ trace_i915_gem_ring_dispatch(ring, flags);
i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d74fcf0..2def3c7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -701,7 +701,7 @@ static void notify_ring(struct drm_device *dev,
if (ring->obj == NULL)
return;
- trace_i915_gem_request_complete(ring);
+ trace_i915_gem_ring_complete(ring);
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e3971c9..feaacbb 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -236,8 +236,8 @@ TRACE_EVENT(i915_gem_evict_everything,
);
TRACE_EVENT(i915_gem_ring_dispatch,
- TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
- TP_ARGS(ring, seqno, flags),
+ TP_PROTO(struct intel_ring_buffer *ring, u32 flags),
+ TP_ARGS(ring, flags),
TP_STRUCT__entry(
__field(u32, dev)
@@ -249,15 +249,35 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->seqno = seqno;
+ __entry->seqno = intel_ring_get_seqno(ring),
__entry->flags = flags;
- i915_trace_irq_get(ring, seqno);
+ i915_trace_irq_get(ring, __entry->seqno);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
__entry->dev, __entry->ring, __entry->seqno, __entry->flags)
);
+TRACE_EVENT(i915_gem_ring_complete,
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ring)
+ __field(u32, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->seqno = ring->get_seqno(ring, false);
+ ),
+
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
+);
+
TRACE_EVENT(i915_gem_ring_flush,
TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
TP_ARGS(ring, invalidate, flush),
@@ -306,26 +326,6 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_ARGS(ring, seqno)
);
-TRACE_EVENT(i915_gem_request_complete,
- TP_PROTO(struct intel_ring_buffer *ring),
- TP_ARGS(ring),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, ring)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
- __entry->seqno = ring->get_seqno(ring, false);
- ),
-
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
-);
-
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno)
--
1.8.4.rc2
More information about the Intel-gfx
mailing list