[Intel-gfx] [RFC 31/38] drm/i915/preempt: scheduler logic for landing preemptive requests

John.C.Harrison at Intel.com John.C.Harrison at Intel.com
Fri Dec 11 06:48:55 PST 2015


From: Dave Gordon <david.s.gordon at intel.com>

This patch adds the GEM & scheduler logic for detection and first-stage
processing of completed preemption requests. Similar to regular batches,
they deposit their sequence number in the hardware status page when
starting and again when finished, but using different locations so that
information pertaining to a preempted batch is not overwritten. Also,
the in-progress flag is not by the GPU cleared at the end of the batch;
instead driver software is responsible for clearing this once the
request completion has been noticed.

Actually-preemptive requests are still disabled via a module parameter
at this early stage, as the rest of the logic to deal with the
consequences of preemption isn't in place yet.

v2: Re-worked to simplify 'pre-emption in progress' logic.

For: VIZ-2021
Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c         | 55 ++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_scheduler.c   | 70 +++++++++++++++++++++++++--------
 drivers/gpu/drm/i915/i915_scheduler.h   |  3 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h |  1 +
 4 files changed, 107 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 66c9a58..ea3d224 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2489,6 +2489,14 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 		ring->last_irq_seqno = 0;
 	}
 
+	/* Also reset sw batch tracking state */
+	for_each_ring(ring, dev_priv, i) {
+		intel_write_status_page(ring, I915_BATCH_DONE_SEQNO, 0);
+		intel_write_status_page(ring, I915_BATCH_ACTIVE_SEQNO, 0);
+		intel_write_status_page(ring, I915_PREEMPTIVE_DONE_SEQNO, 0);
+		intel_write_status_page(ring, I915_PREEMPTIVE_ACTIVE_SEQNO, 0);
+	}
+
 	return 0;
 }
 
@@ -2831,15 +2839,18 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 		return;
 	}
 
-	seqno = ring->get_seqno(ring, false);
+	seqno   = ring->get_seqno(ring, false);
 	trace_i915_gem_request_notify(ring, seqno);
-	if (seqno == ring->last_irq_seqno)
+
+	/* Is there anything new to process? */
+	if ((seqno == ring->last_irq_seqno) && !i915_scheduler_is_ring_preempting(ring))
 		return;
-	ring->last_irq_seqno = seqno;
 
 	if (!fence_locked)
 		spin_lock_irqsave(&ring->fence_lock, flags);
 
+	ring->last_irq_seqno = seqno;
+
 	list_for_each_entry_safe(req, req_next, &ring->fence_signal_list, signal_link) {
 		if (!req->cancelled) {
 			/* How can this happen? */
@@ -2861,7 +2872,7 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 		 * and call scheduler_clean() while the scheduler
 		 * thinks it is still active.
 		 */
-		wake_sched |= i915_scheduler_notify_request(req);
+		wake_sched |= i915_scheduler_notify_request(req, false);
 
 		if (!req->cancelled) {
 			fence_signal_locked(&req->fence);
@@ -2877,6 +2888,42 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 		list_add_tail(&req->unsignal_link, &ring->fence_unsignal_list);
 	}
 
+	if (i915_scheduler_is_ring_preempting(ring)) {
+		u32 preempt_start, preempt_done;
+
+		preempt_start = intel_read_status_page(ring, I915_PREEMPTIVE_ACTIVE_SEQNO);
+		preempt_done = intel_read_status_page(ring, I915_PREEMPTIVE_DONE_SEQNO);
+
+		/*
+		 * A preemption request leaves both ACTIVE and DONE set to the same
+		 * seqno.  If we find ACTIVE set but DONE is different, the preemption
+		 * has started but not yet completed, so leave it until next time.
+		 * After successfully processing a preemption request, we clear ACTIVE
+		 * below to ensure we don't see it again.
+		 */
+		if (preempt_start && preempt_done == preempt_start) {
+			bool sched_ack = false;
+
+			list_for_each_entry_safe(req, req_next, &ring->fence_signal_list, signal_link) {
+				if (req->seqno == preempt_done) {
+					/* De-list and notify the scheduler, but don't signal yet */
+					list_del_init(&req->signal_link);
+					sched_ack = i915_scheduler_notify_request(req, true);
+					break;
+				}
+			}
+
+			WARN_ON(!sched_ack);
+			wake_sched = true;
+
+			/* Capture BATCH ACTIVE to determine whether a batch was in progress when preempted */
+			ring->last_batch_start = intel_read_status_page(ring, I915_BATCH_ACTIVE_SEQNO);
+
+			/* Acknowledge/clear preemption-active flag */
+			intel_write_status_page(req->ring, I915_PREEMPTIVE_ACTIVE_SEQNO, 0);
+		}
+	}
+
 	if (!fence_locked)
 		spin_unlock_irqrestore(&ring->fence_lock, flags);
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 590731d..54b6c32 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -584,40 +584,71 @@ static void i915_scheduler_node_kill(struct i915_scheduler *scheduler,
  * then i915_scheduler_wakeup() is called so the scheduler can do further
  * processing (submit more work) at the end.
  */
-bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
+bool i915_scheduler_notify_request(struct drm_i915_gem_request *req,
+				   bool preempt)
 {
-	struct drm_i915_private *dev_priv  = to_i915(req->ring->dev);
-	struct i915_scheduler   *scheduler = dev_priv->scheduler;
+	struct drm_i915_private *dev_priv  = req->i915;
+	struct i915_scheduler *scheduler = dev_priv->scheduler;
 	struct i915_scheduler_queue_entry *node = req->scheduler_qe;
-	unsigned long       flags;
+	uint32_t ring_id = req->ring->id;
+	unsigned long flags;
+	bool result;
 
 	trace_i915_scheduler_landing(req);
 
-	if (!node)
-		return false;
-
 	spin_lock_irqsave(&scheduler->lock, flags);
 
-	WARN_ON(!I915_SQS_IS_FLYING(node));
-
-	/* Node was in flight so mark it as complete. */
-	if (req->cancelled) {
+	if (!node) {
+		/* Untracked request, presumably ring init */
+		WARN_ON(preempt);
+		WARN_ON(!(req->scheduler_flags & i915_req_sf_untracked));
+		scheduler->stats[ring_id].non_batch_done++;
+		result = false;
+	} else if (WARN(!I915_SQS_IS_FLYING(node), "Node not flying: %d:%d -> %s! [preempt = %d]\n",
+			req->uniq, req->seqno,
+			i915_scheduler_queue_status_str(node->status), preempt)) {
+		/* This shouldn't happen */
+		result = false;
+	} else if (req->cancelled) {
 		/* If a preemption was in progress, it won't complete now. */
+		// Need to clear I915_PREEMPTIVE_ACTIVE_SEQNO???
 		if (node->status == i915_sqs_overtaking)
 			scheduler->flags[req->ring->id] &= ~(i915_sf_preempting|i915_sf_preempted);
 
 		node->status = i915_sqs_dead;
 		scheduler->stats[req->ring->id].kill_flying++;
-	} else {
+		result = true;
+	} else if (node->status == i915_sqs_flying) {
+		WARN(preempt, "Got flying node with preemption!\n");
+
+		/* Node was in flight so mark it as complete. */
 		node->status = i915_sqs_complete;
-		scheduler->stats[req->ring->id].completed++;
+		scheduler->stats[ring_id].completed++;
+		result = true;
+	} else if (node->status == i915_sqs_overtaking) {
+		WARN(!preempt, "Got overtaking node without preemption!\n");
+
+		/* Preempting request has completed & becomes preempted */
+		node->status = i915_sqs_preempted;
+		trace_i915_scheduler_unfly(node->params.ring, node);
+
+		/* Scheduler is now in post-preemption state */
+		scheduler->flags[ring_id] |= i915_sf_preempted;
+		scheduler->stats[ring_id].preempts_completed++;
+		result = true;
+	} else {
+		WARN(true, "Unknown node state: %s [%s]!\n",
+		     i915_scheduler_queue_status_str(node->status),
+		     preempt ? "preempting" : "regular");
+		result = false;
 	}
 
-	trace_i915_scheduler_node_state_change(req->ring, node);
+	if (result)
+		trace_i915_scheduler_node_state_change(req->ring, node);
 
 	spin_unlock_irqrestore(&scheduler->lock, flags);
 
-	return true;
+	return result;
 }
 
 /*
@@ -923,11 +954,16 @@ static int i915_scheduler_dump_locked(struct intel_engine_cs *ring, const char *
 	}
 
 	if (scheduler->flags[ring->id] & i915_sf_dump_seqno) {
-		uint32_t    seqno;
+		uint32_t    seqno, b_active, b_done, p_active, p_done;
 
 		seqno    = ring->get_seqno(ring, true);
+		p_done   = intel_read_status_page(ring, I915_PREEMPTIVE_DONE_SEQNO);
+		p_active = intel_read_status_page(ring, I915_PREEMPTIVE_ACTIVE_SEQNO);
+		b_done   = intel_read_status_page(ring, I915_BATCH_DONE_SEQNO);
+		b_active = intel_read_status_page(ring, I915_BATCH_ACTIVE_SEQNO);
 
-		DRM_DEBUG_DRIVER("<%s> Seqno = %d\n", ring->name, seqno);
+		DRM_DEBUG_DRIVER("<%s> Seqno = %08x, BD = %08x, BA = %08x, PD = %08x, PA = %08x\n",
+				 ring->name, seqno, b_done, b_active, p_done, p_active);
 	}
 
 	if (scheduler->flags[ring->id] & i915_sf_dump_details) {
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 940f25f..5b871b0 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -192,7 +192,8 @@ int         i915_scheduler_closefile(struct drm_device *dev,
 				     struct drm_file *file);
 void        i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node);
 int         i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
-bool        i915_scheduler_notify_request(struct drm_i915_gem_request *req);
+bool        i915_scheduler_notify_request(struct drm_i915_gem_request *req,
+					  bool preempt);
 void        i915_scheduler_wakeup(struct drm_device *dev);
 bool        i915_scheduler_is_ring_flying(struct intel_engine_cs *ring);
 bool        i915_scheduler_is_ring_preempting(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6bdc1ad..54b5ed6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -377,6 +377,7 @@ struct  intel_engine_cs {
 	struct list_head fence_signal_list;
 	struct list_head fence_unsignal_list;
 	uint32_t last_irq_seqno;
+	uint32_t last_batch_start;
 };
 
 bool intel_ring_initialized(struct intel_engine_cs *ring);
-- 
1.9.1



More information about the Intel-gfx mailing list