[Intel-gfx] [PATCH v1] drm/i915: Defer LRC unpin and release

yu.dai at intel.com yu.dai at intel.com
Thu Nov 19 16:52:05 PST 2015


From: Alex Dai <yu.dai at intel.com>

Can't immediately free LRC (neither unpin it) even all its
referenced requests are completed, because HW still need a short
period of time to save data to LRC status page. It is safe to free
LRC when HW completes a request from a different LRC.

Introduce a new function intel_lr_context_do_unpin that do the
actual unpin work. When driver receives unpin call (from retiring
of a request), the LRC pin & ref count will be increased to defer
the unpin and release. If last LRC is different and its pincount
reaches to zero, driver will do the actual unpin work.

There will be always a LRC kept until ring itself gets cleaned up.

v1: Simplify the update of last context by reusing current ring->
last_context. Be note that it is safe to do so because lrc ring
is cleaned up early than i915_gem_context_fini().

Signed-off-by: Alex Dai <yu.dai at intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 59 ++++++++++++++++++++++++++++++++++++----
 1 file changed, 54 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 06180dc..7a3c9cc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1039,6 +1039,55 @@ unpin_ctx_obj:
 	return ret;
 }
 
+static void intel_lr_context_do_unpin(struct intel_engine_cs *ring,
+		struct intel_context *ctx)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *ctx_obj;
+
+	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+	ctx_obj = ctx->engine[ring->id].state;
+	if (!ctx_obj)
+		return;
+
+	i915_gem_object_ggtt_unpin(ctx_obj);
+	intel_unpin_ringbuffer_obj(ctx->engine[ring->id].ringbuf);
+
+	/* Invalidate GuC TLB. */
+	if (i915.enable_guc_submission)
+		I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void set_last_lrc(struct intel_engine_cs *ring,
+		struct intel_context *ctx)
+{
+	struct intel_context *last;
+
+	/* Unpin will be deferred, so the release of lrc. Hold pin & ref count
+	 * untill we receive the retire of next request. */
+	if (ctx) {
+		ctx->engine[ring->id].pin_count++;
+		i915_gem_context_reference(ctx);
+	}
+
+	last = ring->last_context;
+	ring->last_context = ctx;
+
+	if (last == NULL)
+		return;
+
+	/* Unpin is on hold for last context. Release pincount first. Then if HW
+	 * completes request from another lrc, try to do the actual unpin. */
+	last->engine[ring->id].pin_count--;
+	if (last != ctx && !last->engine[ring->id].pin_count)
+		intel_lr_context_do_unpin(ring, last);
+
+	/* Release previous context refcount that on hold */
+	i915_gem_context_unreference(last);
+}
+
 static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
 	int ret = 0;
@@ -1062,14 +1111,11 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
 	struct intel_engine_cs *ring = rq->ring;
 	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
 	if (ctx_obj) {
 		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--rq->ctx->engine[ring->id].pin_count == 0) {
-			intel_unpin_ringbuffer_obj(ringbuf);
-			i915_gem_object_ggtt_unpin(ctx_obj);
-		}
+		--rq->ctx->engine[ring->id].pin_count;
+		set_last_lrc(ring, rq->ctx);
 	}
 }
 
@@ -1908,6 +1954,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 	}
 
 	lrc_destroy_wa_ctx_obj(ring);
+
+	/* this will clean up last lrc */
+	set_last_lrc(ring, NULL);
 }
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
-- 
2.5.0



More information about the Intel-gfx mailing list