[Intel-gfx] [PATCH v4 12/38] drm/i915: Added scheduler hook into i915_gem_request_notify()

John.C.Harrison at Intel.com John.C.Harrison at Intel.com
Mon Jan 11 10:42:41 PST 2016


From: John Harrison <John.C.Harrison at Intel.com>

The scheduler needs to know when requests have completed so that it
can keep its own internal state up to date and can submit new requests
to the hardware from its queue.

v2: Updated due to changes in request handling. The operation is now
reversed from before. Rather than the scheduler being in control of
completion events, it is now the request code itself. The scheduler
merely receives a notification event. It can then optionally request
it's worker thread be woken up after all completion processing is
complete.

v4: Downgraded a BUG_ON to a WARN_ON as the latter is preferred.

For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h       |  2 ++
 drivers/gpu/drm/i915/i915_gem.c       | 16 ++++++++++++++++
 drivers/gpu/drm/i915/i915_scheduler.c | 28 ++++++++++++++++++----------
 drivers/gpu/drm/i915/i915_scheduler.h |  1 +
 4 files changed, 37 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 38f423b..ac4d44b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2262,6 +2262,8 @@ struct drm_i915_gem_request {
 	/** process identifier submitting this request */
 	struct pid *pid;
 
+	struct i915_scheduler_queue_entry	*scheduler_qe;
+
 	/**
 	 * The ELSP only accepts two elements at a time, so we queue
 	 * context/tail pairs on a given queue (ring->execlist_queue) until the
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d4f1d63..3d109b4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2817,6 +2817,7 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 {
 	struct drm_i915_gem_request *req, *req_next;
 	unsigned long flags;
+	bool wake_sched = false;
 	u32 seqno;
 
 	if (list_empty(&ring->fence_signal_list)) {
@@ -2852,6 +2853,14 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 		 */
 		list_del_init(&req->signal_link);
 
+		/*
+		 * NB: Must notify the scheduler before signalling
+		 * the node. Otherwise the node can get retired first
+		 * and call scheduler_clean() while the scheduler
+		 * thinks it is still active.
+		 */
+		wake_sched |= i915_scheduler_notify_request(req);
+
 		if (!req->cancelled) {
 			fence_signal_locked(&req->fence);
 			trace_i915_gem_request_complete(req);
@@ -2868,6 +2877,13 @@ void i915_gem_request_notify(struct intel_engine_cs *ring, bool fence_locked)
 
 	if (!fence_locked)
 		spin_unlock_irqrestore(&ring->fence_lock, flags);
+
+	/* Necessary? Or does the fence_signal() call do an implicit wakeup? */
+	wake_up_all(&ring->irq_queue);
+
+	/* Final scheduler processing after all individual updates are done. */
+	if (wake_sched)
+		i915_scheduler_wakeup(ring->dev);
 }
 
 static const char *i915_gem_request_get_driver_name(struct fence *req_fence)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 26cd088..ac0a6172 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -174,6 +174,9 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 	node->stamp  = jiffies;
 	i915_gem_request_reference(node->params.request);
 
+	WARN_ON(node->params.request->scheduler_qe);
+	node->params.request->scheduler_qe = node;
+
 	/* Need to determine the number of incomplete entries in the list as
 	 * that will be the maximum size of the dependency list.
 	 *
@@ -350,14 +353,16 @@ static void i915_scheduler_node_kill(struct i915_scheduler_queue_entry *node)
  * code has mapped it back to a request and will mark that request complete.
  * It also calls this function to notify the scheduler about the completion
  * so the scheduler's node can be updated appropriately.
- * Returns true if the request is scheduler managed, false if not.
+ * Returns true if the request is scheduler managed, false if not. The return
+ * value is combined for all freshly completed requests and if any were true
+ * then i915_scheduler_wakeup() is called so the scheduler can do further
+ * processing (submit more work) at the end.
  */
 bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
 {
 	struct drm_i915_private *dev_priv  = to_i915(req->ring->dev);
 	struct i915_scheduler   *scheduler = dev_priv->scheduler;
-	/* XXX: Need to map back from request to node */
-	struct i915_scheduler_queue_entry *node = NULL;
+	struct i915_scheduler_queue_entry *node = req->scheduler_qe;
 	unsigned long       flags;
 
 	if (!node)
@@ -375,16 +380,18 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
 
 	spin_unlock_irqrestore(&scheduler->lock, flags);
 
-	/*
-	 * XXX: If the in-flight list is now empty then new work should be
-	 * submitted. However, this function is called from interrupt context
-	 * and thus cannot acquire mutex locks and other such things that are
-	 * necessary for fresh submission.
-	 */
-
 	return true;
 }
 
+/*
+ * Called at the end of seqno interrupt processing if any request has
+ * completed that corresponds to a scheduler node.
+ */
+void i915_scheduler_wakeup(struct drm_device *dev)
+{
+	/* XXX: Need to call i915_scheduler_remove() via work handler. */
+}
+
 int i915_scheduler_remove(struct intel_engine_cs *ring)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -500,6 +507,7 @@ int i915_scheduler_remove(struct intel_engine_cs *ring)
 			i915_gem_execbuff_release_batch_obj(node->params.batch_obj);
 
 		/* Free everything that is owned by the node: */
+		node->params.request->scheduler_qe = NULL;
 		i915_gem_request_unreference(node->params.request);
 		kfree(node->params.cliprects);
 		kfree(node->dep_list);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 02ac6f2..9f54786 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -90,5 +90,6 @@ int         i915_scheduler_closefile(struct drm_device *dev,
 				     struct drm_file *file);
 int         i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
 bool        i915_scheduler_notify_request(struct drm_i915_gem_request *req);
+void        i915_scheduler_wakeup(struct drm_device *dev);
 
 #endif  /* _I915_SCHEDULER_H_ */
-- 
1.9.1



More information about the Intel-gfx mailing list