[Intel-gfx] [RFC 27/37] drm/i915/preempt: scheduler logic for queueing preemptive requests

John.C.Harrison at Intel.com John.C.Harrison at Intel.com
Mon Nov 23 03:42:02 PST 2015


From: Dave Gordon <david.s.gordon at intel.com>

This is the very first stage of the scheduler's preemption logic, where
it determines whether a request should be marked as potentially
preemptive, at the point where it is added to the scheduler's queue.
Subsequent logic will determine how to handle the request on the basis
of the flags set here.

Actually-preemptive requests are disabled via a module parameter at this
early stage, as the rest of the logic to process them isn't in place
yet.

For: VIZ-2021
Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
---
 drivers/gpu/drm/i915/i915_params.c    |  4 +--
 drivers/gpu/drm/i915/i915_scheduler.c | 53 +++++++++++++++++++++++++++++++----
 2 files changed, 49 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7db0f83..72dff7c 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -54,7 +54,7 @@ struct i915_params i915 __read_mostly = {
 	.edp_vswing = 0,
 	.enable_guc_submission = true,
 	.guc_log_level = -1,
-	.scheduler_override = 0,
+	.scheduler_override = i915_so_no_preemption,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
@@ -194,4 +194,4 @@ MODULE_PARM_DESC(guc_log_level,
 	"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
 
 module_param_named(scheduler_override, i915.scheduler_override, int, 0600);
-MODULE_PARM_DESC(scheduler_override, "Scheduler override mask (default: 0)");
+MODULE_PARM_DESC(scheduler_override, "Scheduler override mask (default: 4)");
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 16d067e..50ff8b7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -58,10 +58,13 @@ bool i915_scheduler_is_enabled(struct drm_device *dev)
 
 const char *i915_qe_state_str(struct i915_scheduler_queue_entry *node)
 {
+	uint32_t sched_flags = node->params.request->scheduler_flags;
 	static char	str[50];
 	char		*ptr = str;
 
 	*(ptr++) = node->bumped ? 'B' : '-',
+	*(ptr++) = (sched_flags & i915_req_sf_preempt) ? 'P' : '-';
+	*(ptr++) = (sched_flags & i915_req_sf_was_preempt) ? 'p' : '-';
 	*(ptr++) = i915_gem_request_completed(node->params.request) ? 'C' : '-';
 
 	*ptr = 0;
@@ -84,6 +87,12 @@ char i915_scheduler_queue_status_chr(enum i915_scheduler_queue_status status)
 	case i915_sqs_flying:
 	return 'F';
 
+	case i915_sqs_overtaking:
+	return 'O';
+
+	case i915_sqs_preempted:
+	return 'P';
+
 	case i915_sqs_complete:
 	return 'C';
 
@@ -115,6 +124,12 @@ const char *i915_scheduler_queue_status_str(
 	case i915_sqs_flying:
 	return "Flying";
 
+	case i915_sqs_overtaking:
+	return "Overtaking";
+
+	case i915_sqs_preempted:
+	return "Preempted";
+
 	case i915_sqs_complete:
 	return "Complete";
 
@@ -213,7 +228,7 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 	struct i915_scheduler_queue_entry  *node;
 	struct i915_scheduler_queue_entry  *test;
 	unsigned long       flags;
-	bool                not_flying, found;
+	bool                not_flying, want_preempt, found;
 	int                 i, j, r;
 	int                 incomplete = 0;
 
@@ -385,6 +400,25 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 		not_flying = i915_scheduler_count_flying(scheduler, ring) <
 							 scheduler->min_flying;
 
+	want_preempt = node->priority >= scheduler->priority_level_preempt;
+
+	if (i915.scheduler_override & i915_so_no_preemption)
+		want_preempt = false;
+
+	/* Pre-emption is not yet implemented in non-execlist mode */
+	if (!i915.enable_execlists)
+		want_preempt = false;
+
+	/* Pre-emption is not yet implemented in non-GUC mode */
+	if (!i915.enable_guc_submission)
+		want_preempt = false;
+
+	if (want_preempt) {
+		node->params.request->scheduler_flags |=
+			i915_req_sf_was_preempt | i915_req_sf_preempt;
+		scheduler->stats[ring->id].preempts_queued++;
+	}
+
 	scheduler->stats[ring->id].queued++;
 
 	trace_i915_scheduler_queue(ring, node);
@@ -392,7 +426,7 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 
 	spin_unlock_irqrestore(&scheduler->lock, flags);
 
-	if (not_flying)
+	if (not_flying || want_preempt)
 		i915_scheduler_submit(ring, true);
 
 	return 0;
@@ -403,19 +437,26 @@ static int i915_scheduler_fly_node(struct i915_scheduler_queue_entry *node)
 	struct drm_i915_private *dev_priv = node->params.dev->dev_private;
 	struct i915_scheduler   *scheduler = dev_priv->scheduler;
 	struct intel_engine_cs  *ring;
+	struct drm_i915_gem_request *req;
 
 	BUG_ON(!scheduler);
 	BUG_ON(!node);
 	BUG_ON(node->status != i915_sqs_popped);
 
 	ring = node->params.ring;
+	req = node->params.request;
 
-	/* Add the node (which should currently be in state none) to the front
-	 * of the queue. This ensure that flying nodes are always held in
-	 * hardware submission order. */
+	/*
+	 * Add the node (which should currently be in state popped)
+	 * to the front of the queue. This ensure that flying nodes
+	 * are always held in hardware submission order.
+	 */
 	list_add(&node->link, &scheduler->node_queue[ring->id]);
 
-	node->status = i915_sqs_flying;
+	if (req->scheduler_flags & i915_req_sf_preempt)
+		node->status = i915_sqs_overtaking;
+	else
+		node->status = i915_sqs_flying;
 
 	trace_i915_scheduler_fly(ring, node);
 	trace_i915_scheduler_node_state_change(ring, node);
-- 
1.9.1



More information about the Intel-gfx mailing list