[Intel-gfx] [PATCH 1/2] drm/i915: Convert execlist_submit_contexts() for requests

Mika Kuoppala mika.kuoppala at linux.intel.com
Mon Jul 6 08:35:33 PDT 2015


Make execlist_submit_context() accept requests instead of engine
context objects, also renaming it to execlist_submit_requests()
to better reflect the functionality. Keep passing requests as
first class objects, all the way down to where elsps are written.

The benefits are that with requests, we can mold the request state
where the actual hardware interactions happen. Author also expects
more gains in debuggability and more leverage on context descriptor
creation.

Cc: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala at intel.com>
Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c  |   8 ++-
 drivers/gpu/drm/i915/intel_lrc.c | 103 +++++++++++++++++----------------------
 drivers/gpu/drm/i915/intel_lrc.h |   3 +-
 3 files changed, 49 insertions(+), 65 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 49016e0..927964b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2620,10 +2620,8 @@ void i915_gem_request_free(struct kref *req_ref)
 
 	if (ctx) {
 		if (i915.enable_execlists) {
-			struct intel_engine_cs *ring = req->ring;
-
-			if (ctx != ring->default_context)
-				intel_lr_context_unpin(ring, ctx);
+			if (ctx != req->ring->default_context)
+				intel_lr_context_unpin(req);
 		}
 
 		i915_gem_context_unreference(ctx);
@@ -2765,7 +2763,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 		list_del(&submit_req->execlist_link);
 
 		if (submit_req->ctx != ring->default_context)
-			intel_lr_context_unpin(ring, submit_req->ctx);
+			intel_lr_context_unpin(submit_req);
 
 		i915_gem_request_unreference(submit_req);
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5534f87..74601fa 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -211,8 +211,7 @@ enum {
 #define GEN8_CTX_ID_SHIFT 32
 #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -262,10 +261,11 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
 	return lrca >> 12;
 }
 
-static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
-					 struct drm_i915_gem_object *ctx_obj)
+static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
 {
+	struct intel_engine_cs *ring = rq->ring;
 	struct drm_device *dev = ring->dev;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
 	uint64_t desc;
 	uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
 
@@ -293,24 +293,25 @@ static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
 	return desc;
 }
 
-static void execlists_elsp_write(struct intel_engine_cs *ring,
-				 struct drm_i915_gem_object *ctx_obj0,
-				 struct drm_i915_gem_object *ctx_obj1)
+static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
+				 struct drm_i915_gem_request *rq1)
 {
+
+	struct intel_engine_cs *ring = rq0->ring;
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	uint64_t temp = 0;
 	uint32_t desc[4];
 
 	/* XXX: You must always write both descriptors in the order below. */
-	if (ctx_obj1)
-		temp = execlists_ctx_descriptor(ring, ctx_obj1);
+	if (rq1)
+		temp = execlists_ctx_descriptor(rq1);
 	else
 		temp = 0;
 	desc[1] = (u32)(temp >> 32);
 	desc[0] = (u32)temp;
 
-	temp = execlists_ctx_descriptor(ring, ctx_obj0);
+	temp = execlists_ctx_descriptor(rq0);
 	desc[3] = (u32)(temp >> 32);
 	desc[2] = (u32)temp;
 
@@ -329,19 +330,24 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
 	spin_unlock(&dev_priv->uncore.lock);
 }
 
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
-				    struct drm_i915_gem_object *ring_obj,
-				    struct i915_hw_ppgtt *ppgtt,
-				    u32 tail)
+static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
+	struct intel_engine_cs *ring = rq->ring;
+	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
 	struct page *page;
 	uint32_t *reg_state;
 
+	BUG_ON(!ctx_obj);
+	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
+	WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
+
 	page = i915_gem_object_get_page(ctx_obj, 1);
 	reg_state = kmap_atomic(page);
 
-	reg_state[CTX_RING_TAIL+1] = tail;
-	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+	reg_state[CTX_RING_TAIL+1] = rq->tail;
+	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
 	/* True PPGTT with dynamic page allocation: update PDP registers and
 	 * point the unallocated PDPs to the scratch page
@@ -358,32 +364,15 @@ static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
 	return 0;
 }
 
-static void execlists_submit_contexts(struct intel_engine_cs *ring,
-				      struct intel_context *to0, u32 tail0,
-				      struct intel_context *to1, u32 tail1)
+static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
+				      struct drm_i915_gem_request *rq1)
 {
-	struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
-	struct drm_i915_gem_object *ctx_obj1 = NULL;
-	struct intel_ringbuffer *ringbuf1 = NULL;
-
-	BUG_ON(!ctx_obj0);
-	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
-	WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
-
-	execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
-
-	if (to1) {
-		ringbuf1 = to1->engine[ring->id].ringbuf;
-		ctx_obj1 = to1->engine[ring->id].state;
-		BUG_ON(!ctx_obj1);
-		WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
-		WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
+	execlists_update_context(rq0);
 
-		execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
-	}
+	if (rq1)
+		execlists_update_context(rq1);
 
-	execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
+	execlists_elsp_write(rq0, rq1);
 }
 
 static void execlists_context_unqueue(struct intel_engine_cs *ring)
@@ -443,9 +432,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 
 	WARN_ON(req1 && req1->elsp_submitted);
 
-	execlists_submit_contexts(ring, req0->ctx, req0->tail,
-				  req1 ? req1->ctx : NULL,
-				  req1 ? req1->tail : 0);
+	execlists_submit_requests(req0, req1);
 
 	req0->elsp_submitted++;
 	if (req1)
@@ -549,7 +536,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
 	int num_elements = 0;
 
 	if (request->ctx != ring->default_context)
-		intel_lr_context_pin(ring, request->ctx);
+		intel_lr_context_pin(request);
 
 	i915_gem_request_reference(request);
 
@@ -641,14 +628,14 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 {
 	int ret;
 
+	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+
 	if (request->ctx != request->ring->default_context) {
-		ret = intel_lr_context_pin(request->ring, request->ctx);
+		ret = intel_lr_context_pin(request);
 		if (ret)
 			return ret;
 	}
 
-	request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
-
 	return 0;
 }
 
@@ -958,7 +945,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
 				ctx->engine[ring->id].state;
 
 		if (ctx_obj && (ctx != ring->default_context))
-			intel_lr_context_unpin(ring, ctx);
+			intel_lr_context_unpin(req);
 		list_del(&req->execlist_link);
 		i915_gem_request_unreference(req);
 	}
@@ -1002,15 +989,15 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
+static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 	int ret = 0;
 
 	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-	if (ctx->engine[ring->id].pin_count++ == 0) {
+	if (rq->ctx->engine[ring->id].pin_count++ == 0) {
 		ret = i915_gem_obj_ggtt_pin(ctx_obj,
 				GEN8_LR_CONTEXT_ALIGN, 0);
 		if (ret)
@@ -1026,20 +1013,20 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
 unpin_ctx_obj:
 	i915_gem_object_ggtt_unpin(ctx_obj);
 reset_pin_count:
-	ctx->engine[ring->id].pin_count = 0;
+	rq->ctx->engine[ring->id].pin_count = 0;
 
 	return ret;
 }
 
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	struct intel_engine_cs *ring = rq->ring;
+	struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
 	if (ctx_obj) {
 		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--ctx->engine[ring->id].pin_count == 0) {
+		if (--rq->ctx->engine[ring->id].pin_count == 0) {
 			intel_unpin_ringbuffer_obj(ringbuf);
 			i915_gem_object_ggtt_unpin(ctx_obj);
 		}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index d3dd3ac..e0299fb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -70,8 +70,7 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
 void intel_lr_context_free(struct intel_context *ctx);
 int intel_lr_context_deferred_create(struct intel_context *ctx,
 				     struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
+void intel_lr_context_unpin(struct drm_i915_gem_request *req);
 void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx);
 
-- 
2.1.4



More information about the Intel-gfx mailing list