[Intel-gfx] [RFC 08/39] drm/i915: Prepare retire_requests to handle out-of-order seqnos

John.C.Harrison at Intel.com John.C.Harrison at Intel.com
Fri Jul 17 07:33:17 PDT 2015


From: John Harrison <John.C.Harrison at Intel.com>

A major point of the GPU scheduler is that it re-orders batch buffers after they
have been submitted to the driver. This leads to requests completing out of
order. In turn, this means that the retire processing can no longer assume that
all completed entries are at the front of the list. Rather than attempting to
re-order the request list on a regular basis, it is better to simply scan the
entire list.

There is also a problem with doing the free of the request before the move to
inactive. Thus the requests are now moved to a temporary list first, then the
objects de-activated and finally the requests on the temporary list are freed.

Change-Id: I7eb6793581d9d28eb832e0e94c116b7202fa1b26
For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 54 +++++++++++++++++++++++------------------
 1 file changed, 30 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3fbc6ec..56405cd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3171,6 +3171,10 @@ void i915_gem_reset(struct drm_device *dev)
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 {
+	struct drm_i915_gem_object *obj, *obj_next;
+	struct drm_i915_gem_request *req, *req_next;
+	LIST_HEAD(deferred_request_free);
+
 	WARN_ON(i915_verify_lists(ring->dev));
 
 	/*
@@ -3180,37 +3184,31 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 	 */
 	i915_gem_request_notify(ring);
 
+	/*
+	 * Note that request entries might be out of order due to rescheduling
+	 * and pre-emption. Thus both lists must be processed in their entirety
+	 * rather than stopping at the first non-complete entry.
+	 */
+
 	/* Retire requests first as we use it above for the early return.
 	 * If we retire requests last, we may use a later seqno and so clear
 	 * the requests lists without clearing the active list, leading to
 	 * confusion.
 	 */
-	while (!list_empty(&ring->request_list)) {
-		struct drm_i915_gem_request *request;
-
-		request = list_first_entry(&ring->request_list,
-					   struct drm_i915_gem_request,
-					   list);
-
-		if (!i915_gem_request_completed(request))
-			break;
+	list_for_each_entry_safe(req, req_next, &ring->request_list, list) {
+		if (!i915_gem_request_completed(req))
+			continue;
 
-		i915_gem_request_retire(request);
+		list_move_tail(&req->list, &deferred_request_free);
 	}
 
 	/* Move any buffers on the active list that are no longer referenced
 	 * by the ringbuffer to the flushing/inactive lists as appropriate,
 	 * before we free the context associated with the requests.
 	 */
-	while (!list_empty(&ring->active_list)) {
-		struct drm_i915_gem_object *obj;
-
-		obj = list_first_entry(&ring->active_list,
-				      struct drm_i915_gem_object,
-				      ring_list[ring->id]);
-
+	list_for_each_entry_safe(obj, obj_next, &ring->active_list, ring_list[ring->id]) {
 		if (!list_empty(&obj->last_read_req[ring->id]->list))
-			break;
+			continue;
 
 		i915_gem_object_retire__read(obj, ring->id);
 	}
@@ -3222,18 +3220,26 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 	}
 
 	while (!list_empty(&ring->delayed_free_list)) {
-		struct drm_i915_gem_request *request;
 		unsigned long flags;
 
-		request = list_first_entry(&ring->delayed_free_list,
-					   struct drm_i915_gem_request,
-					   delay_free_list);
+		req = list_first_entry(&ring->delayed_free_list,
+				       struct drm_i915_gem_request,
+				       delay_free_list);
 
 		spin_lock_irqsave(&ring->delayed_free_lock, flags);
-		list_del(&request->delay_free_list);
+		list_del(&req->delay_free_list);
 		spin_unlock_irqrestore(&ring->delayed_free_lock, flags);
 
-		i915_gem_request_free(request);
+		i915_gem_request_free(req);
+	}
+
+	/* It should now be safe to actually free the requests */
+	while (!list_empty(&deferred_request_free)) {
+		req = list_first_entry(&deferred_request_free,
+				       struct drm_i915_gem_request,
+				       list);
+
+		i915_gem_request_retire(req);
 	}
 
 	WARN_ON(i915_verify_lists(ring->dev));
-- 
1.9.1



More information about the Intel-gfx mailing list