[Intel-gfx] [PATCH 3/4] drm/i915: Add the CPU mapping of the hw context to the pinned items.

Nick Hoath nicholas.hoath at intel.com
Tue Oct 6 07:52:03 PDT 2015


Pin the hw ctx mapping so that it is not mapped/unmapped per bb
when doing GuC submission.

Issue: VIZ-4277
Cc: David Gordon <david.s.gordon at intel.com>
Signed-off-by: Nick Hoath <nicholas.hoath at intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c | 14 ++++------
 drivers/gpu/drm/i915/i915_drv.h     |  4 ++-
 drivers/gpu/drm/i915/intel_lrc.c    | 56 +++++++++++++++++++++++++++----------
 3 files changed, 50 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3f2a7a7..e68cf5fa 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1970,10 +1970,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
 static void i915_dump_lrc_obj(struct seq_file *m,
 			      struct intel_engine_cs *ring,
-			      struct drm_i915_gem_object *ctx_obj)
+			      struct drm_i915_gem_object *ctx_obj,
+			      uint32_t *reg_state)
 {
-	struct page *page;
-	uint32_t *reg_state;
 	int j;
 	unsigned long ggtt_offset = 0;
 
@@ -1996,17 +1995,13 @@ static void i915_dump_lrc_obj(struct seq_file *m,
 		return;
 	}
 
-	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-	if (!WARN_ON(page == NULL)) {
-		reg_state = kmap_atomic(page);
-
+	if (!WARN_ON(reg_state == NULL)) {
 		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
 			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
 				   ggtt_offset + 4096 + (j * 4),
 				   reg_state[j], reg_state[j + 1],
 				   reg_state[j + 2], reg_state[j + 3]);
 		}
-		kunmap_atomic(reg_state);
 	}
 
 	seq_putc(m, '\n');
@@ -2034,7 +2029,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 		for_each_ring(ring, dev_priv, i) {
 			if (ring->default_context != ctx)
 				i915_dump_lrc_obj(m, ring,
-						  ctx->engine[i].state);
+						  ctx->engine[i].state,
+						  ctx->engine[i].reg_state);
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d660ee3..b49fd12 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -879,8 +879,10 @@ struct intel_context {
 	} legacy_hw_ctx;
 
 	/* Execlists */
-	struct {
+	struct intel_context_engine {
 		struct drm_i915_gem_object *state;
+		uint32_t *reg_state;
+		struct page *page;
 		struct intel_ringbuffer *ringbuf;
 		int pin_count;
 	} engine[I915_NUM_RINGS];
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b807928..55a4de56 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -360,16 +360,13 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
 	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
 	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
-	struct page *page;
-	uint32_t *reg_state;
+	uint32_t *reg_state = rq->ctx->engine[ring->id].reg_state;
 
 	BUG_ON(!ctx_obj);
+	WARN_ON(!reg_state);
 	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
 	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
 
-	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-	reg_state = kmap_atomic(page);
-
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
 	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
@@ -385,8 +382,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
 		ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
 	}
 
-	kunmap_atomic(reg_state);
-
 	return 0;
 }
 
@@ -1004,7 +999,31 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
+static int intel_mmap_hw_context(struct drm_i915_gem_object *obj,
+		bool unmap)
+{
+	int ret = 0;
+	struct intel_context_engine *ice =
+			(struct intel_context_engine *)obj->mappable;
+	struct page *page;
+	uint32_t *reg_state;
+
+	if (unmap) {
+		kunmap(ice->page);
+		ice->reg_state = NULL;
+		ice->page = NULL;
+	} else {
+		page = i915_gem_object_get_page(obj, LRC_STATE_PN);
+		reg_state = kmap(page);
+		ice->reg_state = reg_state;
+		ice->page = page;
+	}
+	return ret;
+}
+
+static int intel_lr_context_do_pin(
+		struct intel_context *ctx,
+		struct intel_engine_cs *ring,
 		struct drm_i915_gem_object *ctx_obj,
 		struct intel_ringbuffer *ringbuf)
 {
@@ -1051,7 +1070,7 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
 	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
-		ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
+		ret = intel_lr_context_do_pin(rq->ctx, ring, ctx_obj, ringbuf);
 		if (ret)
 			goto reset_pin_count;
 	}
@@ -1915,6 +1934,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
 {
+	struct page *page;
+	uint32_t *reg_state;
 	int ret;
 
 	/* Intentionally left blank. */
@@ -1939,6 +1960,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
 
 	/* As this is the default context, always pin it */
 	ret = intel_lr_context_do_pin(
+			ring->default_context,
 			ring,
 			ring->default_context->engine[ring->id].state,
 			ring->default_context->engine[ring->id].ringbuf);
@@ -1949,6 +1971,13 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
 		return ret;
 	}
 
+	page = i915_gem_object_get_page(
+			ring->default_context->engine[ring->id].state,
+			LRC_STATE_PN);
+	reg_state = kmap(page);
+	ring->default_context->engine[ring->id].reg_state = reg_state;
+	ring->default_context->engine[ring->id].page = page;
+
 	return ret;
 }
 
@@ -2388,6 +2417,7 @@ void intel_lr_context_free(struct intel_context *ctx)
 			struct intel_engine_cs *ring = ringbuf->ring;
 
 			if (ctx == ring->default_context) {
+				kunmap(ctx->engine[ring->id].page);
 				i915_gem_object_ggtt_unpin(ringbuf->obj);
 				i915_gem_object_ggtt_unpin(ctx_obj);
 			}
@@ -2489,6 +2519,8 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
 		goto error_ringbuf;
 	}
 
+	ctx_obj->mmap = intel_mmap_hw_context;
+	ctx_obj->mappable = &(ctx->engine[ring->id]);
 	ctx->engine[ring->id].ringbuf = ringbuf;
 	ctx->engine[ring->id].state = ctx_obj;
 
@@ -2536,7 +2568,6 @@ void intel_lr_context_reset(struct drm_device *dev,
 		struct intel_ringbuffer *ringbuf =
 				ctx->engine[ring->id].ringbuf;
 		uint32_t *reg_state;
-		struct page *page;
 
 		if (!ctx_obj)
 			continue;
@@ -2545,14 +2576,11 @@ void intel_lr_context_reset(struct drm_device *dev,
 			WARN(1, "Failed get_pages for context obj\n");
 			continue;
 		}
-		page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-		reg_state = kmap_atomic(page);
+		reg_state = ctx->engine[ring->id].reg_state;
 
 		reg_state[CTX_RING_HEAD+1] = 0;
 		reg_state[CTX_RING_TAIL+1] = 0;
 
-		kunmap_atomic(reg_state);
-
 		ringbuf->head = 0;
 		ringbuf->tail = 0;
 
-- 
1.9.1



More information about the Intel-gfx mailing list