[Intel-gfx] [PATCH 067/190] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START

Chris Wilson chris at chris-wilson.co.uk
Mon Jan 11 01:17:18 PST 2016


Both the ->dispatch_execbuffer and ->emit_bb_start callbacks do exactly
the same thing, add MI_BATCHBUFFER_START to the request's ringbuffer -
we need only one vfunc.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  6 +--
 drivers/gpu/drm/i915/i915_gem_render_state.c | 16 +++----
 drivers/gpu/drm/i915/intel_lrc.c             |  9 +++-
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 67 +++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h      | 12 +++--
 5 files changed, 55 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3956d74d8c8c..3e6384deca65 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1297,9 +1297,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	exec_start = params->batch_obj_vm_offset +
 		     params->args_batch_start_offset;
 
-	ret = params->ring->dispatch_execbuffer(params->request,
-						exec_start, exec_len,
-						params->dispatch_flags);
+	ret = params->ring->emit_bb_start(params->request,
+					  exec_start, exec_len,
+					  params->dispatch_flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index bee3f0ccd0cd..ccc988c2b226 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -205,18 +205,18 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
-					       so.rodata->batch_items * 4,
-					       I915_DISPATCH_SECURE);
+	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
+					 so.rodata->batch_items * 4,
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
 	if (so.aux_batch_size > 8) {
-		ret = req->engine->dispatch_execbuffer(req,
-						       (so.ggtt_offset +
-							so.aux_batch_offset),
-						       so.aux_batch_size,
-						       I915_DISPATCH_SECURE);
+		ret = req->engine->emit_bb_start(req,
+						 (so.ggtt_offset +
+						  so.aux_batch_offset),
+						 so.aux_batch_size,
+						 I915_DISPATCH_SECURE);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 82b21a883732..30effca91184 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -783,7 +783,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	exec_start = params->batch_obj_vm_offset +
 		     args->batch_start_offset;
 
-	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+	ret = engine->emit_bb_start(params->request,
+				    exec_start, args->batch_len,
+				    params->dispatch_flags);
 	if (ret)
 		return ret;
 
@@ -1409,7 +1411,8 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 }
 
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
-			      u64 offset, unsigned dispatch_flags)
+			      u64 offset, u32 len,
+			      unsigned dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
@@ -1637,12 +1640,14 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 		return 0;
 
 	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
+					 so.rodata->batch_items * 4,
 					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
 	ret = req->engine->emit_bb_start(req,
 					 (so.ggtt_offset + so.aux_batch_offset),
+					 so.aux_batch_size,
 					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e584b0f631f8..04f0a77d49cf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1656,9 +1656,9 @@ gen8_ring_disable_irq(struct intel_engine_cs *ring)
 }
 
 static int
-i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 length,
-			 unsigned dispatch_flags)
+i965_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 length,
+		   unsigned dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1683,9 +1683,9 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 len,
-			 unsigned dispatch_flags)
+i830_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	u32 cs_offset = req->engine->scratch.gtt_offset;
@@ -1746,9 +1746,9 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 len,
-			 unsigned dispatch_flags)
+i915_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2361,9 +2361,9 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			      u64 offset, u32 len,
-			      unsigned dispatch_flags)
+gen8_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
@@ -2387,9 +2387,9 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			     u64 offset, u32 len,
-			     unsigned dispatch_flags)
+hsw_emit_bb_start(struct drm_i915_gem_request *req,
+		  u64 offset, u32 len,
+		  unsigned dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2412,9 +2412,9 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			      u64 offset, u32 len,
-			      unsigned dispatch_flags)
+gen6_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2578,17 +2578,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 	ring->write_tail = ring_write_tail;
 
 	if (IS_HASWELL(dev))
-		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+		ring->emit_bb_start = hsw_emit_bb_start;
 	else if (IS_GEN8(dev))
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		ring->emit_bb_start = gen8_emit_bb_start;
 	else if (INTEL_INFO(dev)->gen >= 6)
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		ring->emit_bb_start = gen6_emit_bb_start;
 	else if (INTEL_INFO(dev)->gen >= 4)
-		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+		ring->emit_bb_start = i965_emit_bb_start;
 	else if (IS_I830(dev) || IS_845G(dev))
-		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+		ring->emit_bb_start = i830_emit_bb_start;
 	else
-		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
+		ring->emit_bb_start = i915_emit_bb_start;
 	ring->init_hw = init_render_ring;
 	ring->cleanup = render_ring_cleanup;
 
@@ -2646,8 +2646,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 			ring->irq_enable = gen8_ring_enable_irq;
 			ring->irq_disable = gen8_ring_disable_irq;
-			ring->dispatch_execbuffer =
-				gen8_ring_dispatch_execbuffer;
+			ring->emit_bb_start = gen8_emit_bb_start;
 			if (i915.semaphores) {
 				ring->semaphore.sync_to = gen8_ring_sync;
 				ring->semaphore.signal = gen8_xcs_signal;
@@ -2657,8 +2656,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 			ring->irq_enable = gen6_ring_enable_irq;
 			ring->irq_disable = gen6_ring_disable_irq;
-			ring->dispatch_execbuffer =
-				gen6_ring_dispatch_execbuffer;
+			ring->emit_bb_start = gen6_emit_bb_start;
 			if (i915.semaphores) {
 				ring->semaphore.sync_to = gen6_ring_sync;
 				ring->semaphore.signal = gen6_signal;
@@ -2687,7 +2685,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 			ring->irq_enable = i9xx_ring_enable_irq;
 			ring->irq_disable = i9xx_ring_disable_irq;
 		}
-		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+		ring->emit_bb_start = i965_emit_bb_start;
 	}
 	ring->init_hw = init_ring_common;
 
@@ -2714,8 +2712,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 	ring->irq_enable = gen8_ring_enable_irq;
 	ring->irq_disable = gen8_ring_disable_irq;
-	ring->dispatch_execbuffer =
-			gen8_ring_dispatch_execbuffer;
+	ring->emit_bb_start = gen8_emit_bb_start;
 	if (i915.semaphores) {
 		ring->semaphore.sync_to = gen8_ring_sync;
 		ring->semaphore.signal = gen8_xcs_signal;
@@ -2744,7 +2741,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 		ring->irq_enable = gen8_ring_enable_irq;
 		ring->irq_disable = gen8_ring_disable_irq;
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		ring->emit_bb_start = gen8_emit_bb_start;
 		if (i915.semaphores) {
 			ring->semaphore.sync_to = gen8_ring_sync;
 			ring->semaphore.signal = gen8_xcs_signal;
@@ -2754,7 +2751,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 		ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 		ring->irq_enable = gen6_ring_enable_irq;
 		ring->irq_disable = gen6_ring_disable_irq;
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		ring->emit_bb_start = gen6_emit_bb_start;
 		if (i915.semaphores) {
 			ring->semaphore.signal = gen6_signal;
 			ring->semaphore.sync_to = gen6_ring_sync;
@@ -2801,7 +2798,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 		ring->irq_enable = gen8_ring_enable_irq;
 		ring->irq_disable = gen8_ring_disable_irq;
-		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		ring->emit_bb_start = gen8_emit_bb_start;
 		if (i915.semaphores) {
 			ring->semaphore.sync_to = gen8_ring_sync;
 			ring->semaphore.signal = gen8_xcs_signal;
@@ -2811,7 +2808,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 		ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
 		ring->irq_enable = hsw_vebox_enable_irq;
 		ring->irq_disable = hsw_vebox_disable_irq;
-		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		ring->emit_bb_start = gen6_emit_bb_start;
 		if (i915.semaphores) {
 			ring->semaphore.sync_to = gen6_ring_sync;
 			ring->semaphore.signal = gen6_signal;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fdeadae726b8..3a10376b896f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -223,12 +223,6 @@ struct intel_engine_cs {
 	 * monotonic, even if not coherent.
 	 */
 	void		(*irq_seqno_barrier)(struct intel_engine_cs *ring);
-	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
-					       u64 offset, u32 length,
-					       unsigned dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
 	void		(*cleanup)(struct intel_engine_cs *ring);
 
 	/* GEN8 signal/wait table - never trust comments!
@@ -301,7 +295,11 @@ struct intel_engine_cs {
 				      u32 invalidate_domains,
 				      u32 flush_domains);
 	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, unsigned dispatch_flags);
+					 u64 offset, u32 length,
+					 unsigned dispatch_flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
 
 	/**
 	 * List of objects currently involved in rendering from the
-- 
2.7.0.rc3



More information about the Intel-gfx mailing list