[Intel-gfx] [RFC 05/39] drm/i915: Split i915_dem_do_execbuffer() in half

John.C.Harrison at Intel.com John.C.Harrison at Intel.com
Fri Jul 17 07:33:14 PDT 2015


From: John Harrison <John.C.Harrison at Intel.com>

Split the execbuffer() function in half. The first half collects and validates
all the information requried to process the batch buffer. It also does all the
object pinning, relocations, active list management, etc - basically anything
that must be done upfront before the IOCTL returns and allows the user land side
to start changing/freeing things. The second half does the actual ring
submission.

This change implements the split but leaves the back half being called directly
from the end of the front half.

Change-Id: I5e1c77639ce526ab2401b0323186c518bf13da0a
For: VIZ-1587
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            |  11 +++
 drivers/gpu/drm/i915/i915_gem.c            |   2 +
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 130 ++++++++++++++++++++---------
 drivers/gpu/drm/i915/intel_lrc.c           |  58 +++++++++----
 drivers/gpu/drm/i915/intel_lrc.h           |   1 +
 5 files changed, 147 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 289ddd6..28d51ac 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1684,10 +1684,18 @@ struct i915_execbuffer_params {
 	struct drm_device               *dev;
 	struct drm_file                 *file;
 	uint32_t                        dispatch_flags;
+	uint32_t                        args_flags;
 	uint32_t                        args_batch_start_offset;
+	uint32_t                        args_batch_len;
+	uint32_t                        args_num_cliprects;
+	uint32_t                        args_DR1;
+	uint32_t                        args_DR4;
 	uint32_t                        batch_obj_vm_offset;
 	struct intel_engine_cs          *ring;
 	struct drm_i915_gem_object      *batch_obj;
+	struct drm_clip_rect            *cliprects;
+	uint32_t                        instp_mask;
+	int                             instp_mode;
 	struct intel_context            *ctx;
 	struct drm_i915_gem_request     *request;
 };
@@ -1929,6 +1937,7 @@ struct drm_i915_private {
 		int (*execbuf_submit)(struct i915_execbuffer_params *params,
 				      struct drm_i915_gem_execbuffer2 *args,
 				      struct list_head *vmas);
+		int (*execbuf_final)(struct i915_execbuffer_params *params);
 		int (*init_rings)(struct drm_device *dev);
 		void (*cleanup_ring)(struct intel_engine_cs *ring);
 		void (*stop_ring)(struct intel_engine_cs *ring);
@@ -2743,9 +2752,11 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 					struct drm_i915_gem_request *req);
 void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
+void i915_gem_execbuff_release_batch_obj(struct drm_i915_gem_object *batch_obj);
 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 				   struct drm_i915_gem_execbuffer2 *args,
 				   struct list_head *vmas);
+int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8150820..2a5667b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5481,11 +5481,13 @@ int i915_gem_init(struct drm_device *dev)
 
 	if (!i915.enable_execlists) {
 		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
+		dev_priv->gt.execbuf_final = i915_gem_ringbuffer_submission_final;
 		dev_priv->gt.init_rings = i915_gem_init_rings;
 		dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
 		dev_priv->gt.stop_ring = intel_stop_ring_buffer;
 	} else {
 		dev_priv->gt.execbuf_submit = intel_execlists_submission;
+		dev_priv->gt.execbuf_final = intel_execlists_submission_final;
 		dev_priv->gt.init_rings = intel_logical_rings_init;
 		dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
 		dev_priv->gt.stop_ring = intel_logical_ring_stop;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 988ecd4..ba9d595 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1198,14 +1198,10 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas)
 {
-	struct drm_clip_rect *cliprects = NULL;
 	struct drm_device *dev = params->dev;
 	struct intel_engine_cs *ring = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u64 exec_start, exec_len;
-	int instp_mode;
-	u32 instp_mask;
-	int i, ret = 0;
+	int ret = 0;
 
 	if (args->num_cliprects != 0) {
 		if (ring != &dev_priv->ring[RCS]) {
@@ -1218,23 +1214,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			return -EINVAL;
 		}
 
-		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+		if (args->num_cliprects > UINT_MAX / sizeof(*params->cliprects)) {
 			DRM_DEBUG("execbuf with %u cliprects\n",
 				  args->num_cliprects);
 			return -EINVAL;
 		}
 
-		cliprects = kcalloc(args->num_cliprects,
-				    sizeof(*cliprects),
+		params->cliprects = kcalloc(args->num_cliprects,
+				    sizeof(*params->cliprects),
 				    GFP_KERNEL);
-		if (cliprects == NULL) {
+		if (params->cliprects == NULL) {
 			ret = -ENOMEM;
 			goto error;
 		}
 
-		if (copy_from_user(cliprects,
+		if (copy_from_user(params->cliprects,
 				   to_user_ptr(args->cliprects_ptr),
-				   sizeof(*cliprects)*args->num_cliprects)) {
+				   sizeof(*params->cliprects)*args->num_cliprects)) {
 			ret = -EFAULT;
 			goto error;
 		}
@@ -1250,19 +1246,19 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 		}
 	}
 
-	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-	instp_mask = I915_EXEC_CONSTANTS_MASK;
-	switch (instp_mode) {
+	params->instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+	params->instp_mask = I915_EXEC_CONSTANTS_MASK;
+	switch (params->instp_mode) {
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (params->instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			ret = -EINVAL;
 			goto error;
 		}
 
-		if (instp_mode != dev_priv->relative_constants_mode) {
+		if (params->instp_mode != dev_priv->relative_constants_mode) {
 			if (INTEL_INFO(dev)->gen < 4) {
 				DRM_DEBUG("no rel constants on pre-gen4\n");
 				ret = -EINVAL;
@@ -1270,7 +1266,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			}
 
 			if (INTEL_INFO(dev)->gen > 5 &&
-			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+			    params->instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
 				ret = -EINVAL;
 				goto error;
@@ -1278,11 +1274,11 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 			/* The HW changed the meaning on this bit on gen6 */
 			if (INTEL_INFO(dev)->gen >= 6)
-				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+				params->instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
 		}
 		break;
 	default:
-		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+		DRM_DEBUG("execbuf with unknown constants: %d\n", params->instp_mode);
 		ret = -EINVAL;
 		goto error;
 	}
@@ -1293,7 +1289,38 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 	i915_gem_execbuffer_move_to_active(vmas, params->request);
 
-	/* To be split into two functions here... */
+	ret = dev_priv->gt.execbuf_final(params);
+	if (ret)
+		goto error;
+
+	/*
+	 * Free everything that was stored in the QE structure (until the
+	 * scheduler arrives and does it instead):
+	 */
+	kfree(params->cliprects);
+	if (params->dispatch_flags & I915_DISPATCH_SECURE)
+		i915_gem_execbuff_release_batch_obj(params->batch_obj);
+
+	return 0;
+
+error:
+	kfree(params->cliprects);
+	return ret;
+}
+
+/*
+ * This is the main function for adding a batch to the ring.
+ * It is called from the scheduler, with the struct_mutex already held.
+ */
+int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
+{
+	struct drm_i915_private *dev_priv = params->dev->dev_private;
+	struct intel_engine_cs  *ring = params->ring;
+	u64 exec_start, exec_len;
+	int ret, i;
+
+	/* The mutex must be acquired before calling this function */
+	BUG_ON(!mutex_is_locked(&params->dev->struct_mutex));
 
 	intel_runtime_pm_get(dev_priv);
 
@@ -1314,7 +1341,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	     "%s didn't clear reload\n", ring->name);
 
 	if (ring == &dev_priv->ring[RCS] &&
-			instp_mode != dev_priv->relative_constants_mode) {
+			params->instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			goto error;
@@ -1322,26 +1349,26 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 		intel_ring_emit(ring, MI_NOOP);
 		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 		intel_ring_emit(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_emit(ring, params->instp_mask << 16 | params->instp_mode);
 		intel_ring_advance(ring);
 
-		dev_priv->relative_constants_mode = instp_mode;
+		dev_priv->relative_constants_mode = params->instp_mode;
 	}
 
-	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, params->request);
+	if (params->args_flags & I915_EXEC_GEN7_SOL_RESET) {
+		ret = i915_reset_gen7_sol_offsets(params->dev, params->request);
 		if (ret)
 			goto error;
 	}
 
-	exec_len   = args->batch_len;
+	exec_len   = params->args_batch_len;
 	exec_start = params->batch_obj_vm_offset +
 		     params->args_batch_start_offset;
 
-	if (cliprects) {
-		for (i = 0; i < args->num_cliprects; i++) {
-			ret = i915_emit_box(params->request, &cliprects[i],
-					    args->DR1, args->DR4);
+	if (params->cliprects) {
+		for (i = 0; i < params->args_num_cliprects; i++) {
+			ret = i915_emit_box(params->request, &params->cliprects[i],
+					    params->args_DR1, params->args_DR4);
 			if (ret)
 				goto error;
 
@@ -1370,7 +1397,6 @@ error:
 	 */
 	intel_runtime_pm_put(dev_priv);
 
-	kfree(cliprects);
 	return ret;
 }
 
@@ -1722,6 +1748,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->file                    = file;
 	params->ring                    = ring;
 	params->dispatch_flags          = dispatch_flags;
+	params->args_flags              = args->flags;
+	params->args_batch_len          = args->batch_len;
+	params->args_num_cliprects      = args->num_cliprects;
+	params->args_DR1                = args->DR1;
+	params->args_DR4                = args->DR4;
 	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
 
@@ -1747,19 +1778,31 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 #endif
 
 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+	if (ret)
+		goto err;
+
+	/* the request owns the ref now */
+	i915_gem_context_unreference(ctx);
 
-err_batch_unpin:
 	/*
-	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
-	 * batch vma for correctness. For less ugly and less fragility this
-	 * needs to be adjusted to also track the ggtt batch vma properly as
-	 * active.
+	 * The eb list is no longer required. The scheduler has extracted all
+	 * the information than needs to persist.
+	 */
+	eb_destroy(eb);
+
+	/*
+	 * Don't clean up everything that is now saved away in the queue.
+	 * Just unlock and return immediately.
 	 */
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+
+err_batch_unpin:
 	if (dispatch_flags & I915_DISPATCH_SECURE)
-		i915_gem_object_ggtt_unpin(batch_obj);
+		i915_gem_execbuff_release_batch_obj(batch_obj);
 
 err:
-	/* the request owns the ref now */
 	i915_gem_context_unreference(ctx);
 	eb_destroy(eb);
 
@@ -1782,6 +1825,17 @@ pre_mutex_err:
 	return ret;
 }
 
+void i915_gem_execbuff_release_batch_obj(struct drm_i915_gem_object *batch_obj)
+{
+	/*
+	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
+	 * batch vma for correctness. For less ugly and less fragility this
+	 * needs to be adjusted to also track the ggtt batch vma properly as
+	 * active.
+	 */
+	i915_gem_object_ggtt_unpin(batch_obj);
+}
+
 /*
  * Legacy execbuffer just creates an exec2 list from the original exec object
  * list array and passes it to the real function.
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 89f3bcd..bba1152 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -830,35 +830,31 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	struct drm_device       *dev = params->dev;
 	struct intel_engine_cs  *ring = params->ring;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
-	u64 exec_start;
-	int instp_mode;
-	u32 instp_mask;
 	int ret;
 
-	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-	instp_mask = I915_EXEC_CONSTANTS_MASK;
-	switch (instp_mode) {
+	params->instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+	params->instp_mask = I915_EXEC_CONSTANTS_MASK;
+	switch (params->instp_mode) {
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+		if (params->instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
 
-		if (instp_mode != dev_priv->relative_constants_mode) {
-			if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+		if (params->instp_mode != dev_priv->relative_constants_mode) {
+			if (params->instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
 				return -EINVAL;
 			}
 
 			/* The HW changed the meaning on this bit on gen6 */
-			instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+			params->instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
 		}
 		break;
 	default:
-		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+		DRM_DEBUG("execbuf with unknown constants: %d\n", params->instp_mode);
 		return -EINVAL;
 	}
 
@@ -888,7 +884,33 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 
 	i915_gem_execbuffer_move_to_active(vmas, params->request);
 
-	/* To be split into two functions here... */
+	ret = dev_priv->gt.execbuf_final(params);
+	if (ret)
+		return ret;
+
+	/*
+	 * Free everything that was stored in the QE structure (until the
+	 * scheduler arrives and does it instead):
+	 */
+	if (params->dispatch_flags & I915_DISPATCH_SECURE)
+		i915_gem_execbuff_release_batch_obj(params->batch_obj);
+
+	return 0;
+}
+
+/*
+ * This is the main function for adding a batch to the ring.
+ * It is called from the scheduler, with the struct_mutex already held.
+ */
+int intel_execlists_submission_final(struct i915_execbuffer_params *params)
+{
+	struct drm_i915_private *dev_priv = params->dev->dev_private;
+	struct intel_engine_cs  *ring = params->ring;
+	u64 exec_start;
+	int ret;
+
+	/* The mutex must be acquired before calling this function */
+	BUG_ON(!mutex_is_locked(&params->dev->struct_mutex));
 
 	/*
 	 * Unconditionally invalidate gpu caches and ensure that we do flush
@@ -899,7 +921,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 		return ret;
 
 	if (ring == &dev_priv->ring[RCS] &&
-	    instp_mode != dev_priv->relative_constants_mode) {
+	    params->instp_mode != dev_priv->relative_constants_mode) {
+		struct intel_ringbuffer *ringbuf = params->request->ringbuf;
+
 		ret = intel_logical_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
@@ -907,14 +931,14 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 		intel_logical_ring_emit(ringbuf, MI_NOOP);
 		intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
 		intel_logical_ring_emit(ringbuf, INSTPM);
-		intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
+		intel_logical_ring_emit(ringbuf, params->instp_mask << 16 | params->instp_mode);
 		intel_logical_ring_advance(ringbuf);
 
-		dev_priv->relative_constants_mode = instp_mode;
+		dev_priv->relative_constants_mode = params->instp_mode;
 	}
 
 	exec_start = params->batch_obj_vm_offset +
-		     args->batch_start_offset;
+		     params->args_batch_start_offset;
 
 	ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 64f89f99..07d402b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -81,6 +81,7 @@ struct i915_execbuffer_params;
 int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas);
+int intel_execlists_submission_final(struct i915_execbuffer_params *params);
 u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
 void intel_lrc_irq_handler(struct intel_engine_cs *ring);
-- 
1.9.1



More information about the Intel-gfx mailing list