[Intel-gfx] [PATCH 07/15] drm/i915: Expose two LRC functions for GuC submission mode

Dave Gordon david.s.gordon at intel.com
Fri Jul 3 05:30:29 PDT 2015


GuC submission is basically execlist submission, but with the GuC
handling the actual writes to the ELSP and the resulting context
switch interrupts. So to prepare a context for submission via the
GuC, we need some of the same functions used in execlist mode.
This commit exposes two such functions, changing their names to
better describe what they do (they're related to logical ring
contexts rather than to execlists per se).

v2:
    Replaces previous "drm/i915: Move execlists defines from .c to .h"

v3:
    Incorporates a change to one of the functions exposed here that was
    previously part of an internal patch, but which was omitted from
    the version recently committed to drm-intel-nightly:
	7a01a0a drm/i915/lrc: Update PDPx registers with lri commands
    So we reinstate this change here.

Issue: VIZ-4884
Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 37 +++++++++++++------------------------
 drivers/gpu/drm/i915/intel_lrc.h |  5 +++++
 2 files changed, 18 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b928ab..164c63a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -262,8 +262,8 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
 	return lrca >> 12;
 }
 
-static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
-					 struct drm_i915_gem_object *ctx_obj)
+uint64_t intel_lr_context_descriptor(struct intel_engine_cs *ring,
+				     struct drm_i915_gem_object *ctx_obj)
 {
 	struct drm_device *dev = ring->dev;
 	uint64_t desc;
@@ -304,13 +304,13 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
 
 	/* XXX: You must always write both descriptors in the order below. */
 	if (ctx_obj1)
-		temp = execlists_ctx_descriptor(ring, ctx_obj1);
+		temp = intel_lr_context_descriptor(ring, ctx_obj1);
 	else
 		temp = 0;
 	desc[1] = (u32)(temp >> 32);
 	desc[0] = (u32)temp;
 
-	temp = execlists_ctx_descriptor(ring, ctx_obj0);
+	temp = intel_lr_context_descriptor(ring, ctx_obj0);
 	desc[3] = (u32)(temp >> 32);
 	desc[2] = (u32)temp;
 
@@ -329,10 +329,10 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
 	spin_unlock(&dev_priv->uncore.lock);
 }
 
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
-				    struct drm_i915_gem_object *ring_obj,
-				    struct i915_hw_ppgtt *ppgtt,
-				    u32 tail)
+/* Update the ringbuffer pointer and tail offset in a saved context image */
+void intel_lr_context_update(struct drm_i915_gem_object *ctx_obj,
+			     struct drm_i915_gem_object *ring_obj,
+			     u32 tail)
 {
 	struct page *page;
 	uint32_t *reg_state;
@@ -340,22 +340,11 @@ static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
 	page = i915_gem_object_get_page(ctx_obj, 1);
 	reg_state = kmap_atomic(page);
 
-	reg_state[CTX_RING_TAIL+1] = tail;
 	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
-
-	/* True PPGTT with dynamic page allocation: update PDP registers and
-	 * point the unallocated PDPs to the scratch page
-	 */
-	if (ppgtt) {
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
-		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
-	}
+	if (tail != ~0u)
+		reg_state[CTX_RING_TAIL+1] = tail;
 
 	kunmap_atomic(reg_state);
-
-	return 0;
 }
 
 static void execlists_submit_contexts(struct intel_engine_cs *ring,
@@ -371,7 +360,7 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
 	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
 	WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
 
-	execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
+	intel_lr_context_update(ctx_obj0, ringbuf0->obj, tail0);
 
 	if (to1) {
 		ringbuf1 = to1->engine[ring->id].ringbuf;
@@ -380,7 +369,7 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
 		WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
 		WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
 
-		execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
+		intel_lr_context_update(ctx_obj1, ringbuf1->obj, tail1);
 	}
 
 	execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
@@ -2032,7 +2021,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
 	reg_state[CTX_RING_TAIL+1] = 0;
 	reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
 	/* Ring buffer start address is not known until the buffer is pinned.
-	 * It is written to the context image in execlists_update_context()
+	 * It is written to the context image in intel_lr_context_update()
 	 */
 	reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
 	reg_state[CTX_RING_BUFFER_CONTROL+1] =
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index f59940a..b3659a1 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -73,6 +73,11 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
 		struct intel_context *ctx);
 void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx);
+void intel_lr_context_update(struct drm_i915_gem_object *ctx_obj,
+			     struct drm_i915_gem_object *ring_obj,
+			     u32 tail);
+uint64_t intel_lr_context_descriptor(struct intel_engine_cs *ring,
+				     struct drm_i915_gem_object *ctx_obj);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
-- 
1.9.1



More information about the Intel-gfx mailing list