[Intel-gfx] [PATCH 32/64] drm/i915: Remove obsolete engine->gpu_caches_dirty
Chris Wilson
chris at chris-wilson.co.uk
Thu Jul 7 08:41:38 UTC 2016
Space for flushing the GPU cache prior to completing the request is
preallocated and so cannot fail.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem_context.c | 2 +-
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 9 +---
drivers/gpu/drm/i915/i915_gem_gtt.c | 18 ++++----
drivers/gpu/drm/i915/i915_gem_request.c | 7 ++-
drivers/gpu/drm/i915/intel_lrc.c | 47 +++----------------
drivers/gpu/drm/i915/intel_lrc.h | 2 -
drivers/gpu/drm/i915/intel_ringbuffer.c | 72 +++++++-----------------------
drivers/gpu/drm/i915/intel_ringbuffer.h | 7 ---
8 files changed, 39 insertions(+), 125 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d0ce01d3684c..32051a68246f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -567,7 +567,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* itlb_before_ctx_switch.
*/
if (IS_GEN6(dev_priv)) {
- ret = req->engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+ ret = req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e05c205f2e19..3b5d03a063eb 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -972,10 +972,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
- /* Unconditionally invalidate gpu caches and ensure that we do flush
- * any residual writes from the previous batch.
- */
- return intel_engine_invalidate_all_caches(req);
+ /* Unconditionally invalidate gpu caches and TLBs. */
+ return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
}
static bool
@@ -1156,9 +1154,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
static void
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
- /* Unconditionally force add_request to emit a full flush. */
- params->engine->gpu_caches_dirty = true;
-
/* Add a breadcrumb for the completion of the batch buffer */
__i915_add_request(params->request, params->batch_obj, true);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c5357802171b..a3d9c723dd02 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1664,9 +1664,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = req->engine->flush(req,
- I915_GEM_GPU_DOMAINS,
- I915_GEM_GPU_DOMAINS);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -1692,9 +1692,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = req->engine->flush(req,
- I915_GEM_GPU_DOMAINS,
- I915_GEM_GPU_DOMAINS);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -1712,9 +1712,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (req->engine->id != RCS) {
- ret = req->engine->flush(req,
- I915_GEM_GPU_DOMAINS,
- I915_GEM_GPU_DOMAINS);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 4dc51e199269..f9fca8c71f24 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -452,10 +452,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
* what.
*/
if (flush_caches) {
- if (i915.enable_execlists)
- ret = logical_ring_flush_all_caches(request);
- else
- ret = intel_engine_flush_all_caches(request);
+ ret = request->engine->emit_flush(request,
+ 0, I915_GEM_GPU_DOMAINS);
+
/* Not allowed to fail! */
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d5fd5fff7eca..025d819614be 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -645,24 +645,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
spin_unlock_bh(&engine->execlist_lock);
}
-static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- uint32_t flush_domains;
- int ret;
-
- flush_domains = 0;
- if (engine->gpu_caches_dirty)
- flush_domains = I915_GEM_GPU_DOMAINS;
-
- ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
- if (ret)
- return ret;
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
@@ -693,7 +675,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(req);
+ return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -933,22 +915,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
-
- if (!engine->gpu_caches_dirty)
- return 0;
-
- ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
@@ -1029,15 +995,15 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *engine = req->engine;
struct intel_ring *ring = req->ring;
struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
return 0;
- engine->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(req);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -1054,8 +1020,9 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_ring_advance(ring);
- engine->gpu_caches_dirty = true;
- ret = logical_ring_flush_all_caches(req);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 63d6a49a0ded..5dae5bc5046e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -69,8 +69,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_logical_rings_init(struct drm_device *dev);
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-
/* Logical Ring Contexts */
/* One extra page is added before LRC for GuC as shared data */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8d931552e9e0..5cf9d3e3d12a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -693,8 +693,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (w->count == 0)
return 0;
- req->engine->gpu_caches_dirty = true;
- ret = intel_engine_flush_all_caches(req);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -711,8 +712,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_ring_advance(ring);
- req->engine->gpu_caches_dirty = true;
- ret = intel_engine_flush_all_caches(req);
+ ret = req->engine->emit_flush(req,
+ I915_GEM_GPU_DOMAINS,
+ I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -2864,21 +2866,21 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
engine->add_request = gen8_render_add_request;
- engine->flush = gen8_render_ring_flush;
+ engine->emit_flush = gen8_render_ring_flush;
if (i915.semaphores)
engine->semaphore.signal = gen8_rcs_signal;
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
- engine->flush = gen7_render_ring_flush;
+ engine->emit_flush = gen7_render_ring_flush;
if (IS_GEN6(dev_priv))
- engine->flush = gen6_render_ring_flush;
+ engine->emit_flush = gen6_render_ring_flush;
} else if (IS_GEN5(dev_priv)) {
- engine->flush = gen4_render_ring_flush;
+ engine->emit_flush = gen4_render_ring_flush;
} else {
if (INTEL_GEN(dev_priv) < 4)
- engine->flush = gen2_render_ring_flush;
+ engine->emit_flush = gen2_render_ring_flush;
else
- engine->flush = gen4_render_ring_flush;
+ engine->emit_flush = gen4_render_ring_flush;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
@@ -2922,7 +2924,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev_priv))
engine->write_tail = gen6_bsd_ring_write_tail;
- engine->flush = gen6_bsd_ring_flush;
+ engine->emit_flush = gen6_bsd_ring_flush;
if (INTEL_GEN(dev_priv) >= 8)
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
@@ -2930,7 +2932,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
} else {
engine->mmio_base = BSD_RING_BASE;
- engine->flush = bsd_ring_flush;
+ engine->emit_flush = bsd_ring_flush;
if (IS_GEN5(dev_priv))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
@@ -2956,7 +2958,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
intel_ring_default_vfuncs(dev_priv, engine);
- engine->flush = gen6_bsd_ring_flush;
+ engine->emit_flush = gen6_bsd_ring_flush;
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
@@ -2976,7 +2978,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
intel_ring_default_vfuncs(dev_priv, engine);
- engine->flush = gen6_ring_flush;
+ engine->emit_flush = gen6_ring_flush;
if (INTEL_GEN(dev_priv) >= 8)
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
@@ -2999,7 +3001,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
intel_ring_default_vfuncs(dev_priv, engine);
- engine->flush = gen6_ring_flush;
+ engine->emit_flush = gen6_ring_flush;
if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
@@ -3013,46 +3015,6 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
return intel_init_engine(dev, engine);
}
-int
-intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
-
- if (!engine->gpu_caches_dirty)
- return 0;
-
- ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
-
- trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
-int
-intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- uint32_t flush_domains;
- int ret;
-
- flush_domains = 0;
- if (engine->gpu_caches_dirty)
- flush_domains = I915_GEM_GPU_DOMAINS;
-
- ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
- if (ret)
- return ret;
-
- trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-
- engine->gpu_caches_dirty = false;
- return 0;
-}
-
void intel_engine_stop(struct intel_engine_cs *engine)
{
int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index dbc17c4ac98b..79faad9462d2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -205,9 +205,6 @@ struct intel_engine_cs {
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
- int __must_check (*flush)(struct drm_i915_gem_request *req,
- u32 invalidate_domains,
- u32 flush_domains);
int (*add_request)(struct drm_i915_gem_request *req);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
@@ -324,8 +321,6 @@ struct intel_engine_cs {
*/
u32 last_submitted_seqno;
- bool gpu_caches_dirty;
-
struct i915_gem_context *last_context;
struct intel_engine_hangcheck hangcheck;
@@ -473,8 +468,6 @@ void intel_ring_update_space(struct intel_ring *ring);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
void intel_fini_pipe_control(struct intel_engine_cs *engine);
--
2.8.1
More information about the Intel-gfx
mailing list