[PATCH 29/29] drm/i915: Encode extra tags into request.global_seqno
Chris Wilson
chris at chris-wilson.co.uk
Wed Oct 10 20:13:20 UTC 2018
To support a virtual engine on which may cause a preemption event to
transfer a request from one engine to another, we need to encode the
engine that we are waiting on into the seqno or else we face a risk that
we may evaluate (and signal) a request as completed if we transfer it to
another engine.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_debugfs.c | 6 ++-
drivers/gpu/drm/i915/i915_drv.h | 2 +-
drivers/gpu/drm/i915/i915_gpu_error.c | 2 +-
drivers/gpu/drm/i915/i915_irq.c | 5 ++-
drivers/gpu/drm/i915/i915_request.c | 22 +++++------
drivers/gpu/drm/i915/i915_request.h | 14 ++++---
drivers/gpu/drm/i915/i915_reset.c | 2 +-
drivers/gpu/drm/i915/intel_breadcrumbs.c | 39 ++++++++++---------
drivers/gpu/drm/i915/intel_engine_cs.c | 8 ++--
drivers/gpu/drm/i915/intel_lrc.c | 9 +++--
drivers/gpu/drm/i915/intel_ringbuffer.c | 5 ++-
drivers/gpu/drm/i915/intel_ringbuffer.h | 14 +++----
drivers/gpu/drm/i915/selftests/i915_request.c | 3 +-
.../drm/i915/selftests/intel_breadcrumbs.c | 8 ++--
.../gpu/drm/i915/selftests/intel_hangcheck.c | 6 +--
15 files changed, 78 insertions(+), 67 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f27d6dcb2f2d..b2d34a1ca24a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1351,8 +1351,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
- seq_printf(m, "\t%s [%d] waiting for %x\n",
- w->tsk->comm, w->tsk->pid, w->seqno);
+ seq_printf(m, "\t%s [%d] waiting for %d:%d\n",
+ w->tsk->comm, w->tsk->pid,
+ upper_32_bits(w->global_seqno),
+ lower_32_bits(w->global_seqno));
}
spin_unlock_irq(&b->rb_lock);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1f58f52f21f3..06944955608d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3750,7 +3750,7 @@ static inline bool
__i915_request_irq_complete(const struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- u32 seqno;
+ u64 seqno;
/* Note that the engine may have wrapped around the seqno, and
* so our request->global_seqno will be ahead of the hardware,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 7d3161a18b56..294775b882bf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1246,7 +1246,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
strcpy(waiter->comm, w->tsk->comm);
waiter->pid = w->tsk->pid;
- waiter->seqno = w->seqno;
+ waiter->seqno = w->global_seqno;
waiter++;
if (++ee->num_waiters == count)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1f46a07a4532..3d85da8fe3fe 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1179,7 +1179,7 @@ static void notify_ring(struct intel_engine_cs *engine)
* and to handle coalescing of multiple seqno updates
* and many waiters.
*/
- if (i915_seqno_passed(seqno, wait->seqno)) {
+ if (i915_seqno_passed(seqno, wait->global_seqno)) {
struct i915_request *waiter = wait->request;
if (waiter &&
@@ -1191,7 +1191,8 @@ static void notify_ring(struct intel_engine_cs *engine)
tsk = wait->tsk;
} else {
if (engine->irq_seqno_barrier &&
- i915_seqno_passed(seqno, wait->seqno - 1)) {
+ i915_seqno_passed(seqno,
+ lower_32_bits(wait->global_seqno) - 1)) {
set_bit(ENGINE_IRQ_BREADCRUMB,
&engine->irq_posted);
tsk = wait->tsk;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 677fabb5db16..5261a25b5892 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -269,7 +269,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
+ lower_32_bits(rq->global_seqno),
intel_engine_get_seqno(engine));
GEM_BUG_ON(!i915_request_completed(rq));
@@ -331,7 +331,7 @@ static void i915_request_retire(struct i915_request *request)
GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
+ lower_32_bits(request->global_seqno),
intel_engine_get_seqno(request->engine));
lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -394,7 +394,7 @@ void i915_request_retire_upto(struct i915_request *rq)
GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
+ lower_32_bits(rq->global_seqno),
intel_engine_get_seqno(rq->engine));
lockdep_assert_held(&rq->i915->drm.struct_mutex);
@@ -449,7 +449,7 @@ void __i915_request_submit(struct i915_request *request)
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- request->global_seqno = seqno;
+ request->global_seqno = (u64)engine->id << 32 | seqno;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
intel_engine_enable_signaling(request, false);
spin_unlock(&request->lock);
@@ -485,7 +485,7 @@ void __i915_request_unsubmit(struct i915_request *request)
GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
+ lower_32_bits(request->global_seqno),
intel_engine_get_seqno(engine));
GEM_BUG_ON(!irqs_disabled());
@@ -496,7 +496,7 @@ void __i915_request_unsubmit(struct i915_request *request)
* is kept in seqno/ring order.
*/
GEM_BUG_ON(!request->global_seqno);
- GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
+ GEM_BUG_ON(lower_32_bits(request->global_seqno) != engine->timeline.seqno);
GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
engine->timeline.seqno--;
@@ -691,7 +691,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* No zalloc, must clear what we need by hand */
rq->global_seqno = 0;
- rq->signaling.wait.seqno = 0;
+ rq->signaling.wait.global_seqno = 0;
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
@@ -780,7 +780,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
GEM_BUG_ON(!from->engine->semaphore.signal);
- seqno = i915_request_global_seqno(from);
+ seqno = lower_32_bits(i915_request_global_seqno(from));
if (!seqno)
goto await_dma_fence;
@@ -1113,7 +1113,7 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
}
static bool __i915_spin_request(const struct i915_request *rq,
- u32 seqno, int state, unsigned long timeout_us)
+ u64 seqno, int state, unsigned long timeout_us)
{
struct intel_engine_cs *engine = rq->engine;
unsigned int irq, cpu;
@@ -1265,7 +1265,7 @@ long i915_request_wait(struct i915_request *rq,
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
/* Optimistic short spin before touching IRQs */
- if (__i915_spin_request(rq, wait.seqno, state, 5))
+ if (__i915_spin_request(rq, wait.global_seqno, state, 5))
goto complete;
set_current_state(state);
@@ -1325,7 +1325,7 @@ long i915_request_wait(struct i915_request *rq,
continue;
/* Only spin if we know the GPU is processing this request */
- if (__i915_spin_request(rq, wait.seqno, state, 2))
+ if (__i915_spin_request(rq, wait.global_seqno, state, 2))
break;
if (!intel_wait_check_request(&wait, rq)) {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 68d36eeb5edb..b5ecd4724cec 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -43,7 +43,7 @@ struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
struct i915_request *request;
- u32 seqno;
+ u64 global_seqno;
};
struct intel_signal_node {
@@ -140,7 +140,7 @@ struct i915_request {
* on the HW queue (i.e. not on the engine timeline list).
* Its value is guarded by the timeline spinlock.
*/
- u32 global_seqno;
+ u64 global_seqno;
/** Position in the ring of the start of the request */
u32 head;
@@ -250,9 +250,11 @@ i915_request_put(struct i915_request *rq)
* the global seqno be stable (due to the memory barriers on submitting
* the commands to the hardware to write the breadcrumb, if the HWS shows
* that it has passed the global seqno and the global seqno is unchanged
- * after the read, it is indeed complete).
+ * after the read, it is indeed complete). To compensate for the danger of
+ * preemption onto another ring, the global_seqno encodes a tag into its
+ * upper 32 bits to identify the execution engine.
*/
-static u32
+static u64
i915_request_global_seqno(const struct i915_request *request)
{
return READ_ONCE(request->global_seqno);
@@ -318,7 +320,7 @@ static inline bool i915_request_started(const struct i915_request *rq)
}
static inline bool
-__i915_request_completed(const struct i915_request *rq, u32 seqno)
+__i915_request_completed(const struct i915_request *rq, u64 seqno)
{
GEM_BUG_ON(!seqno);
return intel_engine_has_completed(rq->engine, seqno) &&
@@ -327,7 +329,7 @@ __i915_request_completed(const struct i915_request *rq, u32 seqno)
static inline bool i915_request_completed(const struct i915_request *rq)
{
- u32 seqno;
+ u64 seqno;
seqno = i915_request_global_seqno(rq);
if (!seqno)
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index f36d63e1fc63..f002ef17e859 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -586,7 +586,7 @@ reset_request(struct intel_engine_cs *engine,
if (i915_request_completed(rq)) {
GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
- engine->name, rq->global_seqno,
+ engine->name, lower_32_bits(rq->global_seqno),
rq->fence.context, rq->fence.seqno,
intel_engine_get_seqno(engine));
stalled = false;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 84bf8d827136..2ff1b747a6a3 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -256,7 +256,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
spin_unlock(&b->irq_lock);
rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
- GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
+ GEM_BUG_ON(!intel_engine_signaled(engine, wait->global_seqno));
RB_CLEAR_NODE(&wait->node);
wake_up_process(wait->tsk);
}
@@ -394,7 +394,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
bool first, armed;
u32 seqno;
- GEM_BUG_ON(!wait->seqno);
+ GEM_BUG_ON(!wait->global_seqno);
/* Insert the request into the retirement ordered list
* of waiters by walking the rbtree. If we are the oldest
@@ -419,7 +419,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
* current bottom-half handle any pending wakeups and instead
* try and get out of the way quickly.
*/
- if (i915_seqno_passed(seqno, wait->seqno)) {
+ if (i915_seqno_passed(seqno, wait->global_seqno)) {
RB_CLEAR_NODE(&wait->node);
return first;
}
@@ -427,7 +427,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
p = &b->waiters.rb_node;
while (*p) {
parent = *p;
- if (wait->seqno == to_wait(parent)->seqno) {
+ if (wait->global_seqno == to_wait(parent)->global_seqno) {
/* We have multiple waiters on the same seqno, select
* the highest priority task (that with the smallest
* task->prio) to serve as the bottom-half for this
@@ -439,10 +439,11 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
} else {
p = &parent->rb_left;
}
- } else if (i915_seqno_passed(wait->seqno,
- to_wait(parent)->seqno)) {
+ } else if (i915_seqno_passed(wait->global_seqno,
+ to_wait(parent)->global_seqno)) {
p = &parent->rb_right;
- if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
+ if (i915_seqno_passed(seqno,
+ to_wait(parent)->global_seqno))
completed = parent;
else
first = false;
@@ -507,7 +508,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
return armed;
/* Make the caller recheck if its request has already started. */
- return intel_engine_has_started(engine, wait->seqno);
+ return intel_engine_has_started(engine, wait->global_seqno);
}
static inline bool chain_wakeup(struct rb_node *rb, int priority)
@@ -560,7 +561,8 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
*/
u32 seqno = intel_engine_get_seqno(engine);
- while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
+ while (i915_seqno_passed(seqno,
+ to_wait(next)->global_seqno)) {
struct rb_node *n = rb_next(next);
__intel_breadcrumbs_finish(b, to_wait(next));
@@ -642,10 +644,9 @@ static int intel_breadcrumbs_signaler(void *arg)
spin_lock_irq(&b->rb_lock);
list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
- u32 this = rq->signaling.wait.seqno;
-
- GEM_BUG_ON(!rq->signaling.wait.seqno);
+ u64 this = rq->signaling.wait.global_seqno;
+ GEM_BUG_ON(!this);
if (!i915_seqno_passed(seqno, this))
break;
@@ -653,7 +654,7 @@ static int intel_breadcrumbs_signaler(void *arg)
__intel_engine_remove_wait(engine,
&rq->signaling.wait);
- rq->signaling.wait.seqno = 0;
+ rq->signaling.wait.global_seqno = 0;
__list_del_entry(&rq->signaling.link);
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
@@ -732,7 +733,7 @@ static void insert_signal(struct intel_breadcrumbs *b,
*/
list_for_each_entry_reverse(iter, &b->signals, signaling.link)
- if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
+ if (i915_seqno_passed(seqno, iter->signaling.wait.global_seqno))
break;
list_add(&request->signaling.link, &iter->signaling.link);
@@ -743,7 +744,7 @@ bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct intel_wait *wait = &request->signaling.wait;
- u32 seqno;
+ u64 seqno;
/*
* Note that we may be called from an interrupt handler on another
@@ -761,10 +762,10 @@ bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
if (!seqno) /* will be enabled later upon execution */
return true;
- GEM_BUG_ON(wait->seqno);
+ GEM_BUG_ON(wait->global_seqno);
wait->tsk = b->signaler;
wait->request = request;
- wait->seqno = seqno;
+ wait->global_seqno = seqno;
/*
* Add ourselves into the list of waiters, but registering our
@@ -796,12 +797,12 @@ void intel_engine_cancel_signaling(struct i915_request *request)
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&request->lock);
- if (!READ_ONCE(request->signaling.wait.seqno))
+ if (!READ_ONCE(request->signaling.wait.global_seqno))
return;
spin_lock(&b->rb_lock);
__intel_engine_remove_wait(engine, &request->signaling.wait);
- if (fetch_and_zero(&request->signaling.wait.seqno))
+ if (fetch_and_zero(&request->signaling.wait.global_seqno))
__list_del_entry(&request->signaling.link);
spin_unlock(&b->rb_lock);
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 77921f86a468..3d53ce41e3c9 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1245,7 +1245,7 @@ static void print_request(struct drm_printer *m,
drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
prefix,
- rq->global_seqno,
+ lower_32_bits(rq->global_seqno),
i915_request_completed(rq) ? "!" : "",
rq->fence.context, rq->fence.seqno,
buf,
@@ -1568,8 +1568,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
- drm_printf(m, "\t%s [%d] waiting for %x\n",
- w->tsk->comm, w->tsk->pid, w->seqno);
+ drm_printf(m, "\t%s [%d] waiting for %d:%d\n",
+ w->tsk->comm, w->tsk->pid,
+ upper_32_bits(w->global_seqno),
+ lower_32_bits(w->global_seqno));
}
spin_unlock(&b->rb_lock);
local_irq_restore(flags);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 1ef8074d9854..a8a9231a41fb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -441,7 +441,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
- rq->global_seqno,
+ lower_32_bits(rq->global_seqno),
rq->fence.context, rq->fence.seqno,
intel_engine_get_seqno(engine),
rq_prio(rq));
@@ -734,7 +734,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n",
rq->engine->name,
(unsigned int)(port - execlists->port),
- rq->global_seqno,
+ lower_32_bits(rq->global_seqno),
rq->fence.context, rq->fence.seqno,
intel_engine_get_seqno(rq->engine));
@@ -943,7 +943,7 @@ static void process_csb(struct intel_engine_cs *engine)
GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
engine->name,
port->context_id, count,
- rq ? rq->global_seqno : 0,
+ rq ? lower_32_bits(rq->global_seqno) : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
intel_engine_get_seqno(engine),
@@ -1739,7 +1739,8 @@ static void execlists_reset(struct intel_engine_cs *engine,
u32 *regs;
GEM_TRACE("%s request global=%d, current=%d\n",
- engine->name, request ? request->global_seqno : 0,
+ engine->name,
+ request ? lower_32_bits(request->global_seqno) : 0,
intel_engine_get_seqno(engine));
spin_lock_irqsave(&engine->timeline.lock, flags);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b8a7a014d46d..93273821e4fb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -575,7 +575,8 @@ static void skip_request(struct i915_request *rq)
static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
{
GEM_TRACE("%s request global=%d, current=%d\n",
- engine->name, rq ? rq->global_seqno : 0,
+ engine->name,
+ rq ? lower_32_bits(rq->global_seqno) : 0,
intel_engine_get_seqno(engine));
/*
@@ -765,7 +766,7 @@ gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
- *cs++ = signal->global_seqno - 1;
+ *cs++ = lower_32_bits(signal->global_seqno) - 1;
*cs++ = 0;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f6ec48a75a69..fb702e649a39 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -992,21 +992,21 @@ static inline void intel_wait_init(struct intel_wait *wait)
wait->request = NULL;
}
-static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
+static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u64 seqno)
{
wait->tsk = current;
- wait->seqno = seqno;
+ wait->global_seqno = seqno;
}
static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
- return wait->seqno;
+ return wait->global_seqno;
}
static inline bool
-intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
+intel_wait_update_seqno(struct intel_wait *wait, u64 seqno)
{
- wait->seqno = seqno;
+ wait->global_seqno = seqno;
return intel_wait_has_seqno(wait);
}
@@ -1018,9 +1018,9 @@ intel_wait_update_request(struct intel_wait *wait,
}
static inline bool
-intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
+intel_wait_check_seqno(const struct intel_wait *wait, u64 seqno)
{
- return wait->seqno == seqno;
+ return wait->global_seqno == seqno;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 8b73a8c21377..9553b9336ce3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -223,7 +223,8 @@ static int igt_request_rewind(void *arg)
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
- vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
+ lower_32_bits(vip->global_seqno),
+ intel_engine_get_seqno(i915->engine[RCS]));
goto err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index f03b407fdbe2..8bd05a99340d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -49,13 +49,13 @@ static int check_rbtree(struct intel_engine_cs *engine,
if (!test_bit(idx, bitmap)) {
pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n",
- idx, w->seqno);
+ idx, lower_32_bits(w->global_seqno));
return -EINVAL;
}
if (n != idx) {
pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n",
- idx, w->seqno, n);
+ idx, lower_32_bits(w->global_seqno), n);
return -EINVAL;
}
@@ -77,7 +77,7 @@ static int check_completion(struct intel_engine_cs *engine,
continue;
pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n",
- n, waiters[n].seqno,
+ n, lower_32_bits(waiters[n].global_seqno),
intel_wait_complete(&waiters[n]) ? "complete" : "active",
test_bit(n, bitmap) ? "active" : "complete");
return -EINVAL;
@@ -217,7 +217,7 @@ static int igt_insert_complete(void *arg)
if (intel_wait_complete(&waiters[n])) {
pr_err("waiter[%d, seqno=%d] completed too early\n",
- n, waiters[n].seqno);
+ n, lower_32_bits(waiters[n].global_seqno));
err = -EINVAL;
goto out_bitmap;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 7966b9538288..db271722b274 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -489,7 +489,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
}
GEM_BUG_ON(!rq->global_seqno);
- seqno = rq->global_seqno - 1;
+ seqno = lower_32_bits(rq->global_seqno) - 1;
i915_request_put(rq);
}
@@ -583,7 +583,7 @@ static int active_request_put(struct i915_request *rq)
rq->engine->name,
rq->fence.context,
rq->fence.seqno,
- i915_request_global_seqno(rq));
+ lower_32_bits(i915_request_global_seqno(rq)));
GEM_TRACE_DUMP();
i915_gem_set_wedged(rq->i915);
@@ -767,7 +767,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
GEM_BUG_ON(!rq->global_seqno);
- seqno = rq->global_seqno - 1;
+ seqno = lower_32_bits(rq->global_seqno) - 1;
}
err = i915_reset_engine(engine, NULL);
--
2.19.1
More information about the Intel-gfx-trybot
mailing list