[Intel-gfx] [RFC 31/37] drm/i915/preempt: scheduler logic for landing preemptive requests
John.C.Harrison at Intel.com
John.C.Harrison at Intel.com
Mon Nov 23 03:42:06 PST 2015
From: Dave Gordon <david.s.gordon at intel.com>
This patch adds the GEM & scheduler logic for detection and first-stage
processing of completed preemption requests. Similar to regular batches,
they deposit their sequence number in the hardware status page when
starting and again when finished, but using different locations so that
information pertaining to a preempted batch is not overwritten. Also,
the in-progress flag is not by the GPU cleared at the end of the batch;
instead driver software is responsible for clearing this once the
request completion has been noticed.
Actually-preemptive requests are still disabled via a module parameter
at this early stage, as the rest of the logic to deal with the
consequences of preemption isn't in place yet.
For: VIZ-2021
Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 68 ++++++++++++++++++++++++++++++++--
drivers/gpu/drm/i915/i915_scheduler.c | 70 ++++++++++++++++++++++++++---------
drivers/gpu/drm/i915/i915_scheduler.h | 3 +-
3 files changed, 120 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 55317b1ca..48a57c0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2493,6 +2493,14 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
ring->last_irq_seqno = 0;
}
+ /* Also reset sw batch tracking state */
+ for_each_ring(ring, dev_priv, i) {
+ intel_write_status_page(ring, I915_BATCH_DONE_SEQNO, 0);
+ intel_write_status_page(ring, I915_BATCH_ACTIVE_SEQNO, 0);
+ intel_write_status_page(ring, I915_PREEMPTIVE_DONE_SEQNO, 0);
+ intel_write_status_page(ring, I915_PREEMPTIVE_ACTIVE_SEQNO, 0);
+ }
+
return 0;
}
@@ -2829,6 +2837,7 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
struct drm_i915_gem_request *req, *req_next;
unsigned long flags;
bool wake_sched = false;
+ u32 preempt;
u32 seqno;
if (list_empty(&ring->fence_signal_list)) {
@@ -2836,9 +2845,23 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
return;
}
- seqno = ring->get_seqno(ring, false);
+ seqno = ring->get_seqno(ring, false);
+ preempt = i915_scheduler_is_ring_preempting(ring) ?
+ intel_read_status_page(ring, I915_PREEMPTIVE_ACTIVE_SEQNO) : 0;
trace_i915_gem_request_notify(ring, seqno);
- if (seqno == ring->last_irq_seqno)
+
+ if (preempt) {
+ u32 preempt_done;
+
+ preempt_done = intel_read_status_page(ring, I915_PREEMPTIVE_DONE_SEQNO);
+
+ /* A mismatch indicates an in-progress operation so ignore it for now */
+ if (preempt_done != preempt)
+ preempt = 0;
+ }
+
+ /* Is there anything new to process? */
+ if ((seqno == ring->last_irq_seqno) && !preempt)
return;
ring->last_irq_seqno = seqno;
@@ -2866,7 +2889,7 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
* and call scheduler_clean() while the scheduler
* thinks it is still active.
*/
- wake_sched |= i915_scheduler_notify_request(req);
+ wake_sched |= i915_scheduler_notify_request(req, false);
if (!req->cancelled) {
fence_signal_locked(&req->fence);
@@ -2882,6 +2905,45 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
list_add_tail(&req->unsignal_link, &ring->fence_unsignal_list);
}
+ /*
+ * Note that doing the pre-emption seqno check before acquiring the
+ * spinlock means that there could be multiple request_notify() calls
+ * attempting to process preemption concurrently. The advantage is
+ * not needing to grab the spinlock when there is nothing to do.
+ * The disadvantage is needed to re-check to see if the preemption
+ * event has already been processed.
+ */
+ if (preempt) {
+ u32 preempt_done;
+
+ preempt = intel_read_status_page(ring, I915_PREEMPTIVE_ACTIVE_SEQNO);
+ preempt_done = intel_read_status_page(ring, I915_PREEMPTIVE_DONE_SEQNO);
+
+ /* A mismatch indicates an in-progress operation so ignore it for now */
+ if (preempt_done != preempt)
+ preempt = 0;
+ }
+
+ /* If a (completed) preemption has occurred then process it now. */
+ if (preempt) {
+ bool sched_ack = false;
+
+ list_for_each_entry_safe(req, req_next, &ring->fence_signal_list, signal_link) {
+ if (req->seqno == preempt) {
+ /* De-list and notify the scheduler, but don't signal yet */
+ list_del_init(&req->signal_link);
+ sched_ack = i915_scheduler_notify_request(req, true);
+ break;
+ }
+ }
+
+ WARN_ON(!sched_ack);
+ wake_sched = true;
+
+ /* Acknowledge/clear preemption-active flag */
+ intel_write_status_page(req->ring, I915_PREEMPTIVE_ACTIVE_SEQNO, 0);
+ }
+
if (!fence_locked)
spin_unlock_irqrestore(&ring->fence_lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index a037ba2..0c2344e 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -558,40 +558,71 @@ static void i915_scheduler_node_kill(struct i915_scheduler *scheduler,
* then i915_scheduler_wakeup() is called so the scheduler can do further
* processing (submit more work) at the end.
*/
-bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
+bool i915_scheduler_notify_request(struct drm_i915_gem_request *req,
+ bool preempt)
{
- struct drm_i915_private *dev_priv = to_i915(req->ring->dev);
- struct i915_scheduler *scheduler = dev_priv->scheduler;
+ struct drm_i915_private *dev_priv = req->i915;
+ struct i915_scheduler *scheduler = dev_priv->scheduler;
struct i915_scheduler_queue_entry *node = req->scheduler_qe;
- unsigned long flags;
+ uint32_t ring_id = req->ring->id;
+ unsigned long flags;
+ bool result;
trace_i915_scheduler_landing(req);
- if (!node)
- return false;
-
spin_lock_irqsave(&scheduler->lock, flags);
- WARN_ON(!I915_SQS_IS_FLYING(node));
-
- /* Node was in flight so mark it as complete. */
- if (req->cancelled) {
+ if (!node) {
+ /* Untracked request, presumably ring init */
+ WARN_ON(preempt);
+ WARN_ON(!(req->scheduler_flags & i915_req_sf_untracked));
+ scheduler->stats[ring_id].non_batch_done++;
+ result = false;
+ } else if (WARN(!I915_SQS_IS_FLYING(node), "Node not flying: %d:%d -> %s! [preempt = %d]\n",
+ req->uniq, req->seqno,
+ i915_scheduler_queue_status_str(node->status), preempt)) {
+ /* This shouldn't happen */
+ result = false;
+ } else if (req->cancelled) {
/* If a preemption was in progress, it won't complete now. */
+ // Need to clear I915_PREEMPTIVE_ACTIVE_SEQNO???
if (node->status == i915_sqs_overtaking)
scheduler->flags[req->ring->id] &= ~(i915_sf_preempting|i915_sf_preempted);
node->status = i915_sqs_dead;
scheduler->stats[req->ring->id].kill_flying++;
- } else {
+ result = true;
+ } else if (node->status == i915_sqs_flying) {
+ WARN(preempt, "Got flying node with preemption!\n");
+
+ /* Node was in flight so mark it as complete. */
node->status = i915_sqs_complete;
- scheduler->stats[req->ring->id].completed++;
+ scheduler->stats[ring_id].completed++;
+ result = true;
+ } else if (node->status == i915_sqs_overtaking) {
+ WARN(!preempt, "Got overtaking node without preemption!\n");
+
+ /* Preempting request has completed & becomes preempted */
+ node->status = i915_sqs_preempted;
+ trace_i915_scheduler_unfly(node->params.ring, node);
+
+ /* Scheduler is now in post-preemption state */
+ scheduler->flags[ring_id] |= i915_sf_preempted;
+ scheduler->stats[ring_id].preempts_completed++;
+ result = true;
+ } else {
+ WARN(true, "Unknown node state: %s [%s]!\n",
+ i915_scheduler_queue_status_str(node->status),
+ preempt ? "preempting" : "regular");
+ result = false;
}
- trace_i915_scheduler_node_state_change(req->ring, node);
+ if (result)
+ trace_i915_scheduler_node_state_change(req->ring, node);
spin_unlock_irqrestore(&scheduler->lock, flags);
- return true;
+ return result;
}
/*
@@ -894,11 +925,16 @@ static int i915_scheduler_dump_locked(struct intel_engine_cs *ring, const char *
}
if (scheduler->flags[ring->id] & i915_sf_dump_seqno) {
- uint32_t seqno;
+ uint32_t seqno, b_active, b_done, p_active, p_done;
seqno = ring->get_seqno(ring, true);
+ p_done = intel_read_status_page(ring, I915_PREEMPTIVE_DONE_SEQNO);
+ p_active = intel_read_status_page(ring, I915_PREEMPTIVE_ACTIVE_SEQNO);
+ b_done = intel_read_status_page(ring, I915_BATCH_DONE_SEQNO);
+ b_active = intel_read_status_page(ring, I915_BATCH_ACTIVE_SEQNO);
- DRM_DEBUG_DRIVER("<%s> Seqno = %d\n", ring->name, seqno);
+ DRM_DEBUG_DRIVER("<%s> Seqno = %08x, BD = %08x, BA = %08x, PD = %08x, PA = %08x\n",
+ ring->name, seqno, b_done, b_active, p_done, p_active);
}
if (scheduler->flags[ring->id] & i915_sf_dump_details) {
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index d5f4af3..2ca5433 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -191,7 +191,8 @@ int i915_scheduler_closefile(struct drm_device *dev,
struct drm_file *file);
void i915_gem_scheduler_clean_node(struct i915_scheduler_queue_entry *node);
int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
-bool i915_scheduler_notify_request(struct drm_i915_gem_request *req);
+bool i915_scheduler_notify_request(struct drm_i915_gem_request *req,
+ bool preempt);
void i915_scheduler_wakeup(struct drm_device *dev);
bool i915_scheduler_is_ring_flying(struct intel_engine_cs *ring);
bool i915_scheduler_is_ring_preempting(struct intel_engine_cs *ring);
--
1.9.1
More information about the Intel-gfx
mailing list