[Intel-gfx] [PATCH] drm/i915/guc: Defer LRC context unpin or release

yu.dai at intel.com yu.dai at intel.com
Fri Nov 6 13:56:59 PST 2015


From: Alex Dai <yu.dai at intel.com>

Can't immediately free LRC context (neither unpin it) even all
its referenced requests are completed, because HW still need a
short period of time to save data to LRC status page. It is safe
to free LRC when HW completes a request from a different LRC.

Move LRC to ring->last_unpin_ctx when its pin count reaches zero.
Increase ctx refcount to make sure it won't be freed immediately.
When HW complete the next request from a different LRC, release
the last yet-to-be-released one. In the case it is pinned again,
simply decrease its refcount.

Signed-off-by: Alex Dai <yu.dai at intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c        | 54 ++++++++++++++++++++++++++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.h |  1 +
 2 files changed, 47 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 06180dc..da01d72 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1051,6 +1051,14 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 		if (ret)
 			goto reset_pin_count;
 	}
+
+	/* If we are holding this LRC from last unpin, unref it here. */
+	if (ring->last_unpin_ctx == rq->ctx) {
+		rq->ctx->engine[ring->id].pin_count--;
+		i915_gem_context_unreference(rq->ctx);
+		ring->last_unpin_ctx = NULL;
+	}
+
 	return ret;
 
 reset_pin_count:
@@ -1058,18 +1066,46 @@ reset_pin_count:
 	return ret;
 }
 
+static void
+lrc_unpin_last_ctx(struct intel_engine_cs *ring)
+{
+	struct intel_context *ctx = ring->last_unpin_ctx;
+	struct drm_i915_gem_object *ctx_obj;
+
+	if (!ctx)
+		return;
+
+	i915_gem_object_ggtt_unpin(ctx->engine[ring->id].state);
+	intel_unpin_ringbuffer_obj(ctx->engine[ring->id].ringbuf);
+
+	WARN_ON(--ctx->engine[ring->id].pin_count);
+	i915_gem_context_unreference(ctx);
+
+	ring->last_unpin_ctx = NULL;
+}
+
 void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
 	struct intel_engine_cs *ring = rq->ring;
-	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
-	if (ctx_obj) {
-		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--rq->ctx->engine[ring->id].pin_count == 0) {
-			intel_unpin_ringbuffer_obj(ringbuf);
-			i915_gem_object_ggtt_unpin(ctx_obj);
-		}
+	if (!rq->ctx->engine[ring->id].state)
+		return;
+
+	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+	/* HW completes request from a different LRC, unpin the last one. */
+	if (ring->last_unpin_ctx != rq->ctx)
+		lrc_unpin_last_ctx(ring);
+
+	if (--rq->ctx->engine[ring->id].pin_count == 0) {
+		/* Last one should be unpined already */
+		WARN_ON(ring->last_unpin_ctx);
+
+		/* Keep the context pinned and ref-counted */
+		rq->ctx->engine[ring->id].pin_count++;
+		i915_gem_context_reference(rq->ctx);
+
+		ring->last_unpin_ctx = rq->ctx;
 	}
 }
 
@@ -1908,6 +1944,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
 	}
 
 	lrc_destroy_wa_ctx_obj(ring);
+
+	lrc_unpin_last_ctx(ring);
 }
 
 static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 58b1976..676d27f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -267,6 +267,7 @@ struct  intel_engine_cs {
 	spinlock_t execlist_lock;
 	struct list_head execlist_queue;
 	struct list_head execlist_retired_req_list;
+	struct intel_context *last_unpin_ctx;
 	u8 next_context_status_buffer;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
 	int		(*emit_request)(struct drm_i915_gem_request *request);
-- 
2.5.0



More information about the Intel-gfx mailing list