[PATCH 25/25] global-seqno

Chris Wilson chris at chris-wilson.co.uk
Fri Sep 2 17:19:09 UTC 2016


---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_drv.h            |  4 ++--
 drivers/gpu/drm/i915/i915_gem.c            |  2 +-
 drivers/gpu/drm/i915/i915_gem_request.c    | 10 ++++++----
 drivers/gpu/drm/i915/i915_gem_request.h    | 28 ++++++++++++++++++++++++----
 drivers/gpu/drm/i915/i915_gpu_error.c      |  2 +-
 drivers/gpu/drm/i915/i915_guc_submission.c |  4 ++--
 drivers/gpu/drm/i915/i915_trace.h          |  8 ++++----
 drivers/gpu/drm/i915/intel_breadcrumbs.c   |  8 +++++---
 drivers/gpu/drm/i915/intel_lrc.c           |  4 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 14 +++++++-------
 11 files changed, 55 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dfbd2c930964..4c9a1bd3fcf7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -675,7 +675,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 			rcu_read_lock();
 			task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
 			seq_printf(m, "    %x @ %d: %s [%d]\n",
-				   req->fence.seqno,
+				   req->global_seqno,
 				   (int) (jiffies - req->emitted_jiffies),
 				   task ? task->comm : "<unknown>",
 				   task ? task->pid : -1);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a906a7875a69..807a97fef27b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3869,7 +3869,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
 	/* Before we do the heavier coherent read of the seqno,
 	 * check the value (hopefully) in the CPU cacheline.
 	 */
-	if (i915_gem_request_completed(req))
+	if (__i915_gem_request_completed(req))
 		return true;
 
 	/* Ensure our read of the seqno is coherent so that we
@@ -3920,7 +3920,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
 			wake_up_process(tsk);
 		rcu_read_unlock();
 
-		if (i915_gem_request_completed(req))
+		if (__i915_gem_request_completed(req))
 			return true;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 560e390cbd74..ff993217be85 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2656,7 +2656,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
 	 */
 	list_for_each_entry(request, &engine->timeline->requests, link) {
-		if (i915_gem_request_completed(request))
+		if (__i915_gem_request_completed(request))
 			continue;
 
 		if (!i915_sw_fence_done(&request->submit))
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 073e13cc451a..44cb0eeffd6c 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -351,7 +351,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	/* Move the oldest request to the slab-cache (if not in use!) */
 	req = list_first_entry_or_null(&engine->timeline->requests,
 				       typeof(*req), link);
-	if (req && i915_gem_request_completed(req))
+	if (req && __i915_gem_request_completed(req))
 		i915_gem_request_retire(req);
 
 	/* Beware: Dragons be flying overhead.
@@ -401,6 +401,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	req->i915 = dev_priv;
 	req->engine = engine;
 	req->timeline = engine->timeline;
+	req->global_seqno = seqno;
 	req->ctx = i915_gem_context_get(ctx);
 
 	/* No zalloc, must clear what we need by hand */
@@ -716,7 +717,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 
 	timeout_us += local_clock_us(&cpu);
 	do {
-		if (i915_gem_request_completed(req))
+		if (__i915_gem_request_completed(req))
 			return true;
 
 		if (signal_pending_state(state, current))
@@ -812,12 +813,13 @@ long i915_wait_request(struct drm_i915_gem_request *req,
 		if (timeout < 0)
 			goto complete;
 	}
+	GEM_BUG_ON(!req->global_seqno);
 
 	/* Optimistic spin for the next ~jiffie before touching IRQs */
 	if (i915_spin_request(req, state, 5))
 		goto complete;
 
-	intel_wait_init(&wait, req->fence.seqno);
+	intel_wait_init(&wait, req->global_seqno);
 	if (intel_engine_add_wait(req->engine, &wait))
 		/* In order to check that we haven't missed the interrupt
 		 * as we enabled it, we need to kick ourselves to do a
@@ -887,7 +889,7 @@ static bool engine_retire_requests(struct intel_engine_cs *engine)
 
 	list_for_each_entry_safe(request, next,
 				 &engine->timeline->requests, link) {
-		if (!i915_gem_request_completed(request))
+		if (!__i915_gem_request_completed(request))
 			return false;
 
 		i915_gem_request_retire(request);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index fd7b06e29c6b..413059413221 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -87,6 +87,8 @@ struct drm_i915_gem_request {
 	struct i915_sw_fence submit;
 	wait_queue_t submitq;
 
+	u32 global_seqno;
+
 	/** GEM sequence number associated with the previous request,
 	 * when the HWS breadcrumb is equal to this the GPU is processing
 	 * this request.
@@ -248,17 +250,35 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 }
 
 static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
+__i915_gem_request_started(const struct drm_i915_gem_request *req)
 {
 	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
 				 req->previous_seqno);
 }
 
 static inline bool
-i915_gem_request_completed(const struct drm_i915_gem_request *req)
+i915_gem_request_started(const struct drm_i915_gem_request *req)
+{
+	if (!req->global_seqno)
+		return false;
+
+	return __i915_gem_request_started(req);
+}
+
+static inline bool
+__i915_gem_request_completed(const struct drm_i915_gem_request *req)
 {
 	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
-				 req->fence.seqno);
+				 req->global_seqno);
+}
+
+static inline bool
+i915_gem_request_completed(const struct drm_i915_gem_request *req)
+{
+	if (!req->global_seqno)
+		return false;
+
+	return __i915_gem_request_completed(req);
 }
 
 bool __i915_spin_request(const struct drm_i915_gem_request *request,
@@ -266,7 +286,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
 static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
 				     int state, unsigned long timeout_us)
 {
-	return (i915_gem_request_started(request) &&
+	return (__i915_gem_request_started(request) &&
 		__i915_spin_request(request, state, timeout_us));
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3dcbc62e8214..42d32d9790d9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1109,7 +1109,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
 		}
 
 		erq = &ee->requests[count++];
-		erq->seqno = request->fence.seqno;
+		erq->seqno = request->global_seqno;
 		erq->jiffies = request->emitted_jiffies;
 		erq->head = request->head;
 		erq->tail = request->tail;
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 4e7d725bb374..a8152d510af3 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -496,7 +496,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc,
 	wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
 
 	wqi->ring_tail = rq->tail >> 3 << WQ_RING_TAIL_SHIFT;
-	wqi->fence_id = rq->fence.seqno;
+	wqi->fence_id = rq->global_seqno;
 
 	kunmap_atomic(base);
 }
@@ -593,7 +593,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
 		client->b_fail += 1;
 
 	guc->submissions[engine_id] += 1;
-	guc->last_seqno[engine_id] = rq->fence.seqno;
+	guc->last_seqno[engine_id] = rq->global_seqno;
 }
 
 /*
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 178798002a73..4c46f7c00323 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 			   __entry->dev = from->i915->drm.primary->index;
 			   __entry->sync_from = from->engine->id;
 			   __entry->sync_to = to->engine->id;
-			   __entry->seqno = from->fence.seqno;
+			   __entry->seqno = from->global_seqno;
 			   ),
 
 	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -489,7 +489,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->fence.seqno;
+			   __entry->seqno = req->global_seqno;
 			   __entry->flags = flags;
 			   fence_enable_sw_signaling(&req->fence);
 			   ),
@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->fence.seqno;
+			   __entry->seqno = req->global_seqno;
 			   ),
 
 	    TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
 	    TP_fast_assign(
 			   __entry->dev = req->i915->drm.primary->index;
 			   __entry->ring = req->engine->id;
-			   __entry->seqno = req->fence.seqno;
+			   __entry->seqno = req->global_seqno;
 			   __entry->blocking =
 				     mutex_is_locked(&req->i915->drm.struct_mutex);
 			   ),
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 9bad14d22c95..9ad1028681cf 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -504,9 +504,11 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
 
 	/* locked by fence_enable_sw_signaling() */
 	assert_spin_locked(&request->lock);
+	if (!request->global_seqno)
+		return;
 
 	request->signaling.wait.tsk = b->signaler;
-	request->signaling.wait.seqno = request->fence.seqno;
+	request->signaling.wait.seqno = request->global_seqno;
 	i915_gem_request_get(request);
 
 	spin_lock(&b->lock);
@@ -530,8 +532,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
 	p = &b->signals.rb_node;
 	while (*p) {
 		parent = *p;
-		if (i915_seqno_passed(request->fence.seqno,
-				      to_signaler(parent)->fence.seqno)) {
+		if (i915_seqno_passed(request->global_seqno,
+				      to_signaler(parent)->global_seqno)) {
 			p = &parent->rb_right;
 			first = false;
 		} else {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 16d7cdd11082..00fcf36ba919 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1566,7 +1566,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 			intel_hws_seqno_address(request->engine) |
 			MI_FLUSH_DW_USE_GTT);
 	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, request->fence.seqno);
+	intel_ring_emit(ring, request->global_seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
 	return intel_logical_ring_advance(request);
@@ -1595,7 +1595,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 			 PIPE_CONTROL_QW_WRITE));
 	intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
 	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(request));
+	intel_ring_emit(ring, request->global_seqno);
 	/* We're thrashing one dword of HWS. */
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a03b21c49caa..34aa60608b63 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1296,7 +1296,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *req)
 				PIPE_CONTROL_CS_STALL);
 		intel_ring_emit(ring, lower_32_bits(gtt_offset));
 		intel_ring_emit(ring, upper_32_bits(gtt_offset));
-		intel_ring_emit(ring, req->fence.seqno);
+		intel_ring_emit(ring, req->global_seqno);
 		intel_ring_emit(ring, 0);
 		intel_ring_emit(ring,
 				MI_SEMAPHORE_SIGNAL |
@@ -1332,7 +1332,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *req)
 				lower_32_bits(gtt_offset) |
 				MI_FLUSH_DW_USE_GTT);
 		intel_ring_emit(ring, upper_32_bits(gtt_offset));
-		intel_ring_emit(ring, req->fence.seqno);
+		intel_ring_emit(ring, req->global_seqno);
 		intel_ring_emit(ring,
 				MI_SEMAPHORE_SIGNAL |
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1365,7 +1365,7 @@ static int gen6_signal(struct drm_i915_gem_request *req)
 		if (i915_mmio_reg_valid(mbox_reg)) {
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 			intel_ring_emit_reg(ring, mbox_reg);
-			intel_ring_emit(ring, req->fence.seqno);
+			intel_ring_emit(ring, req->global_seqno);
 		}
 	}
 
@@ -1396,7 +1396,7 @@ static int i9xx_emit_request(struct drm_i915_gem_request *req)
 
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, req->fence.seqno);
+	intel_ring_emit(ring, req->global_seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_advance(ring);
 
@@ -1446,7 +1446,7 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 			       PIPE_CONTROL_QW_WRITE));
 	intel_ring_emit(ring, intel_hws_seqno_address(engine));
 	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+	intel_ring_emit(ring, req->global_seqno);
 	/* We're thrashing one dword of HWS. */
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
@@ -1484,7 +1484,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req,
 			MI_SEMAPHORE_WAIT |
 			MI_SEMAPHORE_GLOBAL_GTT |
 			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->fence.seqno);
+	intel_ring_emit(ring, signal->global_seqno);
 	intel_ring_emit(ring, lower_32_bits(offset));
 	intel_ring_emit(ring, upper_32_bits(offset));
 	intel_ring_advance(ring);
@@ -1522,7 +1522,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->fence.seqno - 1);
+	intel_ring_emit(ring, signal->global_seqno - 1);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
-- 
2.9.3



More information about the Intel-gfx-trybot mailing list