[Intel-gfx] [PATCH 4/7] drm/i915: Cache LRC state page in the context

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Mon Jan 11 06:08:38 PST 2016


From: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

LRC lifetime is well defined so we can cache the page pointing
to the object backing store in the context in order to avoid
walking over the object SG page list from the interrupt context
without the big lock held.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h  |  1 +
 drivers/gpu/drm/i915/intel_lrc.c | 17 +++++++++++------
 2 files changed, 12 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4b0932634cc9..377cdcb10ad5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -885,6 +885,7 @@ struct intel_context {
 		int pin_count;
 		u32 lrc_offset;
 		u64 lrc_desc;
+		struct page *lrc_state_page;
 	} engine[I915_NUM_RINGS];
 
 	struct list_head link;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 321674d5e1f9..02ef9413700d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -338,14 +338,10 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
 	struct intel_engine_cs *ring = rq->ring;
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-	struct page *page;
 	uint32_t *reg_state;
 
-	BUG_ON(!ctx_obj);
-
-	page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-	reg_state = kmap_atomic(page);
+	BUG_ON(!rq->ctx->engine[ring->id].lrc_state_page);
+	reg_state = kmap_atomic(rq->ctx->engine[ring->id].lrc_state_page);
 
 	reg_state[CTX_RING_TAIL+1] = rq->tail;
 	reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->gtt_offset;
@@ -1015,6 +1011,7 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
 	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct page *lrc_state_page;
 	int ret;
 
 	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
@@ -1024,6 +1021,12 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
 	if (ret)
 		return ret;
 
+	lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
+	if (WARN_ON(!lrc_state_page)) {
+		ret = -ENODEV;
+		goto unpin_ctx_obj;
+	}
+
 	ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
 	if (ret)
 		goto unpin_ctx_obj;
@@ -1033,6 +1036,7 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
 	ctx->engine[ring->id].lrc_desc = ring->ctx_desc_template |
 					 ctx->engine[ring->id].lrc_offset |
 					 ((u64)intel_execlists_ctx_id(ctx, ring) << GEN8_CTX_ID_SHIFT);
+	ctx->engine[ring->id].lrc_state_page = lrc_state_page;
 	ctx_obj->dirty = true;
 
 	/* Invalidate GuC TLB. */
@@ -1077,6 +1081,7 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 			i915_gem_object_ggtt_unpin(ctx_obj);
 			rq->ctx->engine[ring->id].lrc_offset = 0;
 			rq->ctx->engine[ring->id].lrc_desc = 0;
+			rq->ctx->engine[ring->id].lrc_state_page = NULL;
 		}
 	}
 }
-- 
1.9.1



More information about the Intel-gfx mailing list