[Intel-gfx] [PATCH v2 01/11] drm/i915/execlists: Move request unwinding to a separate function
Chris Wilson
chris at chris-wilson.co.uk
Wed Sep 27 16:44:30 UTC 2017
In the future, we will want to unwind requests following a preemption
point. This requires the same steps as for unwinding upon a reset, so
extract the existing code to a separate function for later use.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala at linux.intel.com>
---
drivers/gpu/drm/i915/intel_lrc.c | 54 +++++++++++++++++++++++++---------------
1 file changed, 34 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 61cac26a8b05..d67907a7e8e6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -348,6 +348,38 @@ lookup_priolist(struct intel_engine_cs *engine,
return ptr_pack_bits(p, first, 1);
}
+static void unwind_wa_tail(struct drm_i915_gem_request *rq)
+{
+ rq->tail = intel_ring_wrap(rq->ring,
+ rq->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
+ assert_ring_tail_valid(rq->ring, rq->tail);
+}
+
+static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *rq, *rn;
+
+ lockdep_assert_held(&engine->timeline->lock);
+
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->timeline->requests,
+ link) {
+ struct i915_priolist *p;
+
+ if (i915_gem_request_completed(rq))
+ return;
+
+ __i915_gem_request_unsubmit(rq);
+ unwind_wa_tail(rq);
+
+ p = lookup_priolist(engine,
+ &rq->priotree,
+ rq->priotree.priority);
+ list_add(&rq->priotree.link,
+ &ptr_mask_bits(p, 1)->requests);
+ }
+}
+
static inline void
execlists_context_status_change(struct drm_i915_gem_request *rq,
unsigned long status)
@@ -1382,7 +1414,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_gem_request *rq, *rn;
struct intel_context *ce;
unsigned long flags;
@@ -1400,21 +1431,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
execlist_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
- list_for_each_entry_safe_reverse(rq, rn,
- &engine->timeline->requests, link) {
- struct i915_priolist *p;
-
- if (i915_gem_request_completed(rq))
- break;
-
- __i915_gem_request_unsubmit(rq);
-
- p = lookup_priolist(engine,
- &rq->priotree,
- rq->priotree.priority);
- list_add(&rq->priotree.link,
- &ptr_mask_bits(p, 1)->requests);
- }
+ unwind_incomplete_requests(engine);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -1451,10 +1468,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
intel_ring_update_space(request->ring);
/* Reset WaIdleLiteRestore:bdw,skl as well */
- request->tail =
- intel_ring_wrap(request->ring,
- request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
- assert_ring_tail_valid(request->ring, request->tail);
+ unwind_wa_tail(request);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
--
2.14.1
More information about the Intel-gfx
mailing list