[Mesa-dev] [PATCH 43/70] i965: Move all the render preamble together
Chris Wilson
chris at chris-wilson.co.uk
Fri Aug 7 13:13:47 PDT 2015
Rather than split the render batch setup between two hooks, coalesce it
into a single callback. To simplify this, move some of the state
dirtying from the start to the finish hook hook.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
src/mesa/drivers/dri/i965/brw_batch.h | 6 ++---
src/mesa/drivers/dri/i965/brw_context.c | 36 ++++++++++++++++-----------
src/mesa/drivers/dri/i965/intel_batchbuffer.c | 16 +-----------
3 files changed, 26 insertions(+), 32 deletions(-)
diff --git a/src/mesa/drivers/dri/i965/brw_batch.h b/src/mesa/drivers/dri/i965/brw_batch.h
index bef544d..bbfb736 100644
--- a/src/mesa/drivers/dri/i965/brw_batch.h
+++ b/src/mesa/drivers/dri/i965/brw_batch.h
@@ -229,7 +229,7 @@ inline static void brw_batch_maybe_flush(struct brw_batch *batch)
void intel_batchbuffer_save_state(struct brw_batch *batch);
void intel_batchbuffer_reset_to_saved(struct brw_batch *batch);
-void intel_batchbuffer_emit_render_ring_prelude(struct brw_batch *batch);
+void brw_batch_start_hook(struct brw_batch *batch);
#define USED_BATCH(batch) ((uintptr_t)((batch)->map_next - (batch)->map))
@@ -262,8 +262,8 @@ intel_batchbuffer_require_space(struct brw_batch *batch, GLuint sz,
*/
batch->ring = ring;
- if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
- intel_batchbuffer_emit_render_ring_prelude(batch);
+ if (unlikely(prev_ring == UNKNOWN_RING))
+ brw_batch_start_hook(batch);
}
int brw_batch_begin(struct brw_batch *batch,
diff --git a/src/mesa/drivers/dri/i965/brw_context.c b/src/mesa/drivers/dri/i965/brw_context.c
index 2126d68..95e545c 100644
--- a/src/mesa/drivers/dri/i965/brw_context.c
+++ b/src/mesa/drivers/dri/i965/brw_context.c
@@ -1502,20 +1502,8 @@ void brw_batch_start_hook(brw_batch *batch)
{
struct brw_context *brw = container_of(batch, brw, batch);
- /* If the kernel supports hardware contexts, then most hardware state is
- * preserved between batches; we only need to re-emit state that is required
- * to be in every batch. Otherwise we need to re-emit all the state that
- * would otherwise be stored in the context (which for all intents and
- * purposes means everything).
- */
- if (!batch->hw_ctx)
- brw->ctx.NewDriverState |= BRW_NEW_CONTEXT;
-
- brw->ctx.NewDriverState |= BRW_NEW_BATCH;
-
- brw->state_batch_count = 0;
-
- brw->ib.type = -1;
+ if (batch->ring != RENDER_RING)
+ return;
/* We need to periodically reap the shader time results, because rollover
* happens every few seconds. We also want to see results every once in a
@@ -1527,6 +1515,8 @@ void brw_batch_start_hook(brw_batch *batch)
if (INTEL_DEBUG & DEBUG_PERFMON)
brw_dump_perf_monitors(brw);
+
+ brw_perf_monitor_new_batch(brw);
}
/**
@@ -1579,6 +1569,24 @@ void brw_batch_finish_hook(brw_batch *batch)
brw_perf_monitor_finish_batch(brw);
brw->cache.bo_used_by_gpu = true;
+
+ brw->state_batch_count = 0;
+
+ brw->ib.type = -1;
+
+ if (brw->use_resource_streamer)
+ gen7_reset_hw_bt_pool_offsets(brw);
+
+ /* If the kernel supports hardware contexts, then most hardware state is
+ * preserved between batches; we only need to re-emit state that is required
+ * to be in every batch. Otherwise we need to re-emit all the state that
+ * would otherwise be stored in the context (which for all intents and
+ * purposes means everything).
+ */
+ if (!batch->hw_ctx)
+ brw->ctx.NewDriverState |= BRW_NEW_CONTEXT;
+
+ brw->ctx.NewDriverState |= BRW_NEW_BATCH;
}
void brw_batch_report_flush_hook(struct brw_batch *batch,
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index 49c1c27..0d5821f 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -168,15 +168,6 @@ do_batch_dump(struct brw_context *brw)
}
}
-void
-intel_batchbuffer_emit_render_ring_prelude(struct brw_batch *batch)
-{
- struct brw_context *brw = container_of(batch, brw, batch);
-
- /* We may need to enable and snapshot OA counters. */
- brw_perf_monitor_new_batch(brw);
-}
-
/**
* Called when starting a new batch buffer.
*/
@@ -186,8 +177,6 @@ brw_new_batch(struct brw_context *brw)
/* Create a new batchbuffer and reset the associated state: */
drm_intel_gem_bo_clear_relocs(brw->batch.bo, 0);
intel_batchbuffer_reset(brw);
-
- brw_batch_start_hook(&brw->batch);
}
static void
@@ -344,9 +333,6 @@ brw_batch_flush(struct brw_batch *batch, struct perf_debug *info)
drm_intel_bo_wait_rendering(brw->batch.bo);
}
- if (brw->use_resource_streamer)
- gen7_reset_hw_bt_pool_offsets(brw);
-
/* Start a new batch buffer. */
brw_new_batch(brw);
@@ -443,7 +429,7 @@ int brw_batch_end(struct brw_batch *batch)
batch->ring = ring;
if (ring == RENDER_RING)
- intel_batchbuffer_emit_render_ring_prelude(batch);
+ brw_batch_start_hook(batch);
batch->repeat = true;
longjmp(batch->jmpbuf, 1);
--
2.5.0
More information about the mesa-dev
mailing list