[Mesa-dev] [PATCH 16/17] i965: Delete BATCH_RESERVED handling.
Kenneth Graunke
kenneth at whitecape.org
Wed Sep 6 00:09:49 UTC 2017
Now that we can grom the batchbuffer if we absolutely need the extra
space, we don't need to reserve space for the final do-or-die ending
commands.
---
src/mesa/drivers/dri/i965/intel_batchbuffer.c | 11 +++--------
src/mesa/drivers/dri/i965/intel_batchbuffer.h | 26 --------------------------
2 files changed, 3 insertions(+), 34 deletions(-)
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index 118f75c4d71..0af9101e5f4 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -168,7 +168,6 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch,
add_exec_bo(batch, batch->bo);
assert(batch->bo->index == 0);
- batch->reserved_space = BATCH_RESERVED;
batch->needs_sol_reset = false;
batch->state_base_address_emitted = false;
@@ -318,8 +317,7 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
/* For now, flush as if the batch and state buffers still shared a BO */
const unsigned batch_used = USED_BATCH(*batch) * 4;
- if (batch_used + sz >=
- BATCH_SZ - batch->reserved_space - batch->state_used) {
+ if (batch_used + sz >= BATCH_SZ - batch->state_used) {
if (!brw->no_batch_wrap) {
intel_batchbuffer_flush(brw);
} else {
@@ -327,8 +325,7 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
MIN2(batch->bo->size + batch->bo->size / 2, MAX_BATCH_SIZE);
grow_buffer(brw, &batch->bo, &batch->map, batch_used, new_size);
batch->map_next = (void *) batch->map + batch_used;
- assert(batch_used + sz <
- batch->bo->size - batch->reserved_space - batch->state_used);
+ assert(batch_used + sz < batch->bo->size - batch->state_used);
}
}
@@ -831,8 +828,6 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
bytes_for_state, 100.0f * bytes_for_state / STATE_SZ);
}
- brw->batch.reserved_space = 0;
-
brw_finish_batch(brw);
/* Mark the end of the buffer. */
@@ -967,7 +962,7 @@ brw_state_batch(struct brw_context *brw,
uint32_t offset = ALIGN(batch->state_used, alignment);
/* For now, follow the old flushing behavior. */
- int batch_space = batch->reserved_space + USED_BATCH(*batch) * 4;
+ int batch_space = USED_BATCH(*batch) * 4;
if (offset + size >= STATE_SZ - batch_space) {
if (!brw->no_batch_wrap) {
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
index 8a2e3cfc9bb..c02cafed521 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
@@ -10,32 +10,6 @@
extern "C" {
#endif
-/**
- * Number of bytes to reserve for commands necessary to complete a batch.
- *
- * This includes:
- * - MI_BATCHBUFFER_END (4 bytes)
- * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
- * - Any state emitted by vtbl->finish_batch():
- * - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
- * - Disabling OA counters on Gen6+ (3 DWords = 12 bytes)
- * - Ending MI_REPORT_PERF_COUNT on Gen5+, plus associated PIPE_CONTROLs:
- * - Two sets of PIPE_CONTROLs, which become 4 PIPE_CONTROLs each on SNB,
- * which are 5 DWords each ==> 2 * 4 * 5 * 4 = 160 bytes
- * - 3 DWords for MI_REPORT_PERF_COUNT itself on Gen6+. ==> 12 bytes.
- * On Ironlake, it's 6 DWords, but we have some slack due to the lack of
- * Sandybridge PIPE_CONTROL madness.
- * - CC_STATE workaround on HSW (17 * 4 = 68 bytes)
- * - 10 dwords for initial mi_flush
- * - 2 dwords for CC state setup
- * - 5 dwords for the required pipe control at the end
- * - Restoring L3 configuration: (24 dwords = 96 bytes)
- * - 2*6 dwords for two PIPE_CONTROL flushes.
- * - 7 dwords for L3 configuration set-up.
- * - 5 dwords for L3 atomic set-up (on HSW).
- */
-#define BATCH_RESERVED 308
-
struct intel_batchbuffer;
void intel_batchbuffer_init(struct intel_screen *screen,
--
2.14.1
More information about the mesa-dev
mailing list