[Intel-gfx] [RFC 33/37] drm/i915/preempt: Refactor intel_lr_context_reset()

John.C.Harrison at Intel.com John.C.Harrison at Intel.com
Mon Nov 23 03:42:08 PST 2015


From: Dave Gordon <david.s.gordon at intel.com>

After preemption, we need to empty out the ringbuffers associated
with preempted requests, so that the scheduler has a clean ring
into which to (re-)insert requests (not necessarily in the same
order as before they were preempted).

So this patch refactors the existing routine intel_lr_context_reset()
into a new inner core intel_lr_context_resync() which just updates
a context and the associated ringbuffer, and an outer wrapper which
implements the original operation of intel_lr_context_reset() in
terms of resync().

For: VIZ-2021
Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
---
 drivers/gpu/drm/i915/i915_gem_context.c |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c        | 77 +++++++++++++++++++++------------
 drivers/gpu/drm/i915/intel_lrc.h        |  6 ++-
 3 files changed, 55 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a4dda23..3726cd8 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -341,7 +341,7 @@ void i915_gem_context_reset(struct drm_device *dev)
 		struct intel_context *ctx;
 
 		list_for_each_entry(ctx, &dev_priv->context_list, link) {
-			intel_lr_context_reset(dev, ctx);
+			intel_lr_context_reset(ctx);
 		}
 
 		return;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 1ccb50d..e00d9385 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2669,39 +2669,62 @@ error_pm:
 	return ret;
 }
 
-void intel_lr_context_reset(struct drm_device *dev,
-			struct intel_context *ctx)
+/*
+ * Empty the ringbuffer associated with the specified request
+ * by updating the ringbuffer 'head' to the value of 'tail', or,
+ * if 'rezero' is true, setting both 'head' and 'tail' to zero.
+ * Then propagate the change to the associated context image.
+ */
+void intel_lr_context_resync(struct intel_context *ctx,
+			     struct intel_engine_cs *ring,
+			     bool rezero)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int i;
+	enum intel_ring_id ring_id = ring->id;
+	struct drm_i915_gem_object *ctx_obj;
+	struct intel_ringbuffer *ringbuf;
+	struct page *page;
+	uint32_t *reg_state;
 
-	for_each_ring(ring, dev_priv, i) {
-		struct drm_i915_gem_object *ctx_obj =
-				ctx->engine[ring->id].state;
-		struct intel_ringbuffer *ringbuf =
-				ctx->engine[ring->id].ringbuf;
-		uint32_t *reg_state;
-		struct page *page;
+	ctx_obj = ctx->engine[ring_id].state;
+	ringbuf = ctx->engine[ring_id].ringbuf;
 
-		if (!ctx_obj)
-			continue;
+	/*
+	 * When resetting, a hardware context might be as-yet-unused
+	 * and therefore not-yet-allocated. In other situations, the
+	 * ringbuffer and context object must already exist.
+	 */
+	if (WARN_ON(!ringbuf != !ctx_obj))
+		return;
+	if (!i915_reset_in_progress(&ctx->i915->gpu_error))
+		WARN_ON(!ringbuf || !ctx_obj);
+	if (!ringbuf || !ctx_obj)
+		return;
+	if (WARN_ON(i915_gem_object_get_pages(ctx_obj)))
+		return;
 
-		if (i915_gem_object_get_pages(ctx_obj)) {
-			WARN(1, "Failed get_pages for context obj\n");
-			continue;
-		}
-		page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-		reg_state = kmap_atomic(page);
+	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+	reg_state = kmap_atomic(page);
 
-		reg_state[CTX_RING_HEAD+1] = 0;
-		reg_state[CTX_RING_TAIL+1] = 0;
+	if (rezero)
+		ringbuf->tail = 0;
+	ringbuf->head = ringbuf->tail;
+	ringbuf->last_retired_head = -1;
+	intel_ring_update_space(ringbuf);
 
-		kunmap_atomic(reg_state);
+	reg_state[CTX_RING_HEAD+1] = ringbuf->head;
+	reg_state[CTX_RING_TAIL+1] = ringbuf->tail;
 
-		ringbuf->head = 0;
-		ringbuf->tail = 0;
-		ringbuf->last_retired_head = -1;
-		intel_ring_update_space(ringbuf);
+	kunmap_atomic(reg_state);
+}
+
+void intel_lr_context_reset(struct intel_context *ctx)
+{
+	struct drm_i915_private *dev_priv = ctx->i915;
+	struct intel_engine_cs *ring;
+	int i;
+
+	for_each_ring(ring, dev_priv, i) {
+		intel_lr_context_resync(ctx, ring, true);
 	}
 }
+
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 8ed6c18..fa2f814 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -83,8 +83,10 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
 				    struct intel_engine_cs *ring);
 void intel_lr_context_unpin(struct drm_i915_gem_request *req);
-void intel_lr_context_reset(struct drm_device *dev,
-			struct intel_context *ctx);
+void intel_lr_context_resync(struct intel_context *ctx,
+			     struct intel_engine_cs *ring,
+			     bool rezero);
+void intel_lr_context_reset(struct intel_context *ctx);
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
 				     struct intel_engine_cs *ring);
 
-- 
1.9.1



More information about the Intel-gfx mailing list