[Mesa-dev] [PATCH 35/51] i965: Convert some intel_batchbuffer prototypes over to brw_batch
Chris Wilson
chris at chris-wilson.co.uk
Tue Jan 10 21:23:58 UTC 2017
Just to ease the next intermediate patch.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
src/mesa/drivers/dri/i965/brw_batch.h | 1 +
src/mesa/drivers/dri/i965/brw_compute.c | 6 +--
src/mesa/drivers/dri/i965/brw_draw.c | 7 ++--
src/mesa/drivers/dri/i965/brw_state_batch.c | 6 +--
src/mesa/drivers/dri/i965/brw_urb.c | 4 +-
src/mesa/drivers/dri/i965/genX_blorp_exec.c | 9 +++--
src/mesa/drivers/dri/i965/intel_batchbuffer.c | 56 +++++++++++++--------------
src/mesa/drivers/dri/i965/intel_batchbuffer.h | 21 +++++-----
src/mesa/drivers/dri/i965/intel_blit.c | 7 ++--
9 files changed, 61 insertions(+), 56 deletions(-)
diff --git a/src/mesa/drivers/dri/i965/brw_batch.h b/src/mesa/drivers/dri/i965/brw_batch.h
index 89d8a88124..6fe80bcb90 100644
--- a/src/mesa/drivers/dri/i965/brw_batch.h
+++ b/src/mesa/drivers/dri/i965/brw_batch.h
@@ -62,6 +62,7 @@ typedef struct brw_batch {
enum brw_gpu_ring ring;
bool needs_sol_reset;
bool state_base_address_emitted;
+ int gen;
bool no_batch_wrap;
diff --git a/src/mesa/drivers/dri/i965/brw_compute.c b/src/mesa/drivers/dri/i965/brw_compute.c
index 1feb4a0401..4e76817661 100644
--- a/src/mesa/drivers/dri/i965/brw_compute.c
+++ b/src/mesa/drivers/dri/i965/brw_compute.c
@@ -200,9 +200,9 @@ brw_dispatch_compute_common(struct gl_context *ctx)
* we've got validated state that needs to be in the same batch as the
* primitives.
*/
- intel_batchbuffer_require_space(brw, estimated_buffer_space_needed,
+ intel_batchbuffer_require_space(&brw->batch, estimated_buffer_space_needed,
RENDER_RING);
- intel_batchbuffer_save_state(brw);
+ intel_batchbuffer_save_state(&brw->batch);
retry:
brw->batch.no_batch_wrap = true;
@@ -214,7 +214,7 @@ brw_dispatch_compute_common(struct gl_context *ctx)
if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
if (!fail_next) {
- intel_batchbuffer_reset_to_saved(brw);
+ intel_batchbuffer_reset_to_saved(&brw->batch);
brw_batch_flush(&brw->batch, NULL);
fail_next = true;
goto retry;
diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c
index 928138ccaa..afbee5167b 100644
--- a/src/mesa/drivers/dri/i965/brw_draw.c
+++ b/src/mesa/drivers/dri/i965/brw_draw.c
@@ -507,8 +507,9 @@ brw_try_draw_prims(struct gl_context *ctx,
* we've got validated state that needs to be in the same batch as the
* primitives.
*/
- intel_batchbuffer_require_space(brw, estimated_max_prim_size, RENDER_RING);
- intel_batchbuffer_save_state(brw);
+ intel_batchbuffer_require_space(&brw->batch,
+ estimated_max_prim_size, RENDER_RING);
+ intel_batchbuffer_save_state(&brw->batch);
if (brw->num_instances != prims[i].num_instances ||
brw->basevertex != prims[i].basevertex ||
@@ -598,7 +599,7 @@ retry:
if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
if (!fail_next) {
- intel_batchbuffer_reset_to_saved(brw);
+ intel_batchbuffer_reset_to_saved(&brw->batch);
brw_batch_flush(&brw->batch, NULL);
fail_next = true;
goto retry;
diff --git a/src/mesa/drivers/dri/i965/brw_state_batch.c b/src/mesa/drivers/dri/i965/brw_state_batch.c
index 40be068b76..4b5198f13e 100644
--- a/src/mesa/drivers/dri/i965/brw_state_batch.c
+++ b/src/mesa/drivers/dri/i965/brw_state_batch.c
@@ -85,7 +85,7 @@ brw_annotate_aub(struct brw_context *brw)
drm_intel_aub_annotation annotations[annotation_count];
int a = 0;
make_annotation(&annotations[a++], AUB_TRACE_TYPE_BATCH, 0,
- 4 * USED_BATCH(brw->batch));
+ 4 * USED_BATCH(&brw->batch));
for (int i = brw->state_batch_count; i-- > 0; ) {
uint32_t type = brw->state_batch_list[i].type;
uint32_t start_offset = brw->state_batch_list[i].offset;
@@ -134,8 +134,8 @@ __brw_state_batch(struct brw_context *brw,
* space, then flush and try again.
*/
if (batch->state_batch_offset < size ||
- offset < 4 * USED_BATCH(*batch) + batch->reserved_space) {
- brw_batch_flush(&brw->batch, NULL);
+ offset < 4 * USED_BATCH(batch) + batch->reserved_space) {
+ brw_batch_flush(batch, NULL);
offset = ROUND_DOWN_TO(batch->state_batch_offset - size, alignment);
}
diff --git a/src/mesa/drivers/dri/i965/brw_urb.c b/src/mesa/drivers/dri/i965/brw_urb.c
index a8c7c7d803..b381fb1c8f 100644
--- a/src/mesa/drivers/dri/i965/brw_urb.c
+++ b/src/mesa/drivers/dri/i965/brw_urb.c
@@ -249,8 +249,8 @@ void brw_upload_urb_fence(struct brw_context *brw)
uf.bits1.cs_fence = brw->urb.size;
/* erratum: URB_FENCE must not cross a 64byte cacheline */
- if ((USED_BATCH(brw->batch) & 15) > 12) {
- int pad = 16 - (USED_BATCH(brw->batch) & 15);
+ if ((USED_BATCH(&brw->batch) & 15) > 12) {
+ int pad = 16 - (USED_BATCH(&brw->batch) & 15);
do
*brw->batch.map_next++ = MI_NOOP;
while (--pad);
diff --git a/src/mesa/drivers/dri/i965/genX_blorp_exec.c b/src/mesa/drivers/dri/i965/genX_blorp_exec.c
index e9a564135d..6b75a3b727 100644
--- a/src/mesa/drivers/dri/i965/genX_blorp_exec.c
+++ b/src/mesa/drivers/dri/i965/genX_blorp_exec.c
@@ -173,10 +173,11 @@ genX(blorp_exec)(struct blorp_batch *batch,
brw_select_pipeline(brw, BRW_RENDER_PIPELINE);
retry:
- intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
- intel_batchbuffer_save_state(brw);
+ intel_batchbuffer_require_space(&brw->batch,
+ estimated_max_batch_usage, RENDER_RING);
+ intel_batchbuffer_save_state(&brw->batch);
brw_bo *saved_bo = brw->batch.bo;
- uint32_t saved_used = USED_BATCH(brw->batch);
+ uint32_t saved_used = USED_BATCH(&brw->batch);
uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
#if GEN_GEN == 6
@@ -221,7 +222,7 @@ retry:
if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
if (!check_aperture_failed_once) {
check_aperture_failed_once = true;
- intel_batchbuffer_reset_to_saved(brw);
+ intel_batchbuffer_reset_to_saved(&brw->batch);
brw_batch_flush(&brw->batch, NULL);
goto retry;
} else {
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index fb0ec73c15..e0ba259e5e 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -44,6 +44,7 @@ intel_batchbuffer_init(struct brw_batch *batch, dri_bufmgr *bufmgr,
int gen, bool has_llc)
{
batch->bufmgr = bufmgr;
+ batch->gen = gen;
intel_batchbuffer_reset(batch, has_llc);
@@ -99,21 +100,20 @@ intel_batchbuffer_reset(struct brw_batch *batch, bool has_llc)
}
void
-intel_batchbuffer_save_state(struct brw_context *brw)
+intel_batchbuffer_save_state(struct brw_batch *batch)
{
- brw->batch.saved.map_next = brw->batch.map_next;
- brw->batch.saved.reloc_count =
- drm_intel_gem_bo_get_reloc_count(brw->batch.bo);
+ batch->saved.map_next = batch->map_next;
+ batch->saved.reloc_count = drm_intel_gem_bo_get_reloc_count(batch->bo);
}
void
-intel_batchbuffer_reset_to_saved(struct brw_context *brw)
+intel_batchbuffer_reset_to_saved(struct brw_batch *batch)
{
- drm_intel_gem_bo_clear_relocs(brw->batch.bo, brw->batch.saved.reloc_count);
+ drm_intel_gem_bo_clear_relocs(batch->bo, batch->saved.reloc_count);
- brw->batch.map_next = brw->batch.saved.map_next;
- if (USED_BATCH(brw->batch) == 0)
- brw->batch.ring = UNKNOWN_RING;
+ batch->map_next = batch->saved.map_next;
+ if (USED_BATCH(batch) == 0)
+ batch->ring = UNKNOWN_RING;
}
void
@@ -130,29 +130,29 @@ intel_batchbuffer_free(struct brw_batch *batch)
}
void
-intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
+intel_batchbuffer_require_space(struct brw_batch *batch, GLuint sz,
enum brw_gpu_ring ring)
{
/* If we're switching rings, implicitly flush the batch. */
- if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
- brw->gen >= 6) {
- brw_batch_flush(&brw->batch, NULL);
+ if (unlikely(ring != batch->ring) && batch->ring != UNKNOWN_RING &&
+ batch->gen >= 6) {
+ brw_batch_flush(batch, NULL);
}
#ifdef DEBUG
assert(sz < BATCH_SZ - BATCH_RESERVED);
#endif
- if (intel_batchbuffer_space(&brw->batch) < sz)
- brw_batch_flush(&brw->batch, NULL);
+ if (intel_batchbuffer_space(batch) < sz)
+ brw_batch_flush(batch, NULL);
- enum brw_gpu_ring prev_ring = brw->batch.ring;
+ enum brw_gpu_ring prev_ring = batch->ring;
/* The intel_batchbuffer_flush() calls above might have changed
* brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
*/
- brw->batch.ring = ring;
+ batch->ring = ring;
if (unlikely(prev_ring == UNKNOWN_RING && ring == RENDER_RING))
- intel_batchbuffer_emit_render_ring_prelude(brw);
+ intel_batchbuffer_emit_render_ring_prelude(batch);
}
static void
@@ -171,7 +171,7 @@ do_batch_dump(struct brw_context *brw)
drm_intel_decode_set_batch_pointer(decode,
batch->bo->virtual,
batch->bo->offset64,
- USED_BATCH(*batch));
+ USED_BATCH(batch));
} else {
fprintf(stderr,
"WARNING: failed to map batchbuffer (%s), "
@@ -180,7 +180,7 @@ do_batch_dump(struct brw_context *brw)
drm_intel_decode_set_batch_pointer(decode,
batch->map,
batch->bo->offset64,
- USED_BATCH(*batch));
+ USED_BATCH(batch));
}
drm_intel_decode_set_output_file(decode, stderr);
@@ -196,7 +196,7 @@ do_batch_dump(struct brw_context *brw)
}
void
-intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw)
+intel_batchbuffer_emit_render_ring_prelude(struct brw_batch *batch)
{
/* Un-used currently */
}
@@ -268,7 +268,7 @@ do_flush_locked(struct brw_context *brw)
if (brw->has_llc) {
drm_intel_bo_unmap(batch->bo);
} else {
- ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
+ ret = drm_intel_bo_subdata(batch->bo, 0, 4 * USED_BATCH(batch), batch->map);
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
ret = drm_intel_bo_subdata(batch->bo,
batch->state_batch_offset,
@@ -294,11 +294,11 @@ do_flush_locked(struct brw_context *brw)
brw_annotate_aub(brw);
if (batch->hw_ctx == NULL || batch->ring != RENDER_RING) {
- ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
+ ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(batch),
NULL, 0, 0, flags);
} else {
ret = drm_intel_gem_bo_context_exec(batch->bo, batch->hw_ctx,
- 4 * USED_BATCH(*batch), flags);
+ 4 * USED_BATCH(batch), flags);
}
}
@@ -325,14 +325,14 @@ brw_batch_flush(struct brw_batch *batch, struct perf_debug *info)
struct brw_context *brw = container_of(batch, brw, batch);
int ret;
- if (USED_BATCH(brw->batch) == 0)
+ if (USED_BATCH(batch) == 0)
return 0;
if (brw->batch.throttle_batch[0] == NULL)
brw->batch.throttle_batch[0] = brw_bo_get(brw->batch.bo);
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
- int bytes_for_commands = 4 * USED_BATCH(brw->batch);
+ int bytes_for_commands = 4 * USED_BATCH(batch);
int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
int total_bytes = bytes_for_commands + bytes_for_state;
fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
@@ -352,7 +352,7 @@ brw_batch_flush(struct brw_batch *batch, struct perf_debug *info)
/* Mark the end of the buffer. */
intel_batchbuffer_emit_dword(&brw->batch, MI_BATCH_BUFFER_END);
- if (USED_BATCH(brw->batch) & 1) {
+ if (USED_BATCH(&brw->batch) & 1) {
/* Round batchbuffer usage to 2 DWORDs. */
intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
}
@@ -427,7 +427,7 @@ intel_batchbuffer_data(struct brw_context *brw,
const void *data, GLuint bytes, enum brw_gpu_ring ring)
{
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(brw, bytes, ring);
+ intel_batchbuffer_require_space(&brw->batch, bytes, ring);
memcpy(brw->batch.map_next, data, bytes);
brw->batch.map_next += bytes >> 2;
}
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.h b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
index a984735bb1..508b32900f 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.h
@@ -18,13 +18,14 @@ struct brw_batch;
struct brw_context;
enum brw_gpu_ring;
-void intel_batchbuffer_emit_render_ring_prelude(struct brw_context *brw);
+void intel_batchbuffer_emit_render_ring_prelude(struct brw_batch *batch);
int intel_batchbuffer_init(struct brw_batch *batch, dri_bufmgr *bufmgr,
int gen, bool has_llc);
-void intel_batchbuffer_free(struct brw_batch *batch);
-void intel_batchbuffer_save_state(struct brw_context *brw);
-void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
-void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
+void intel_batchbuffer_free(struct brw_batch *brw);
+
+void intel_batchbuffer_save_state(struct brw_batch *batch);
+void intel_batchbuffer_reset_to_saved(struct brw_batch *batch);
+void intel_batchbuffer_require_space(struct brw_batch *batch, GLuint sz,
enum brw_gpu_ring ring);
void brw_batch_start_hook(struct brw_batch *batch);
@@ -51,7 +52,7 @@ uint64_t intel_batchbuffer_reloc64(struct brw_batch *batch,
uint32_t write_domain,
uint32_t delta);
-#define USED_BATCH(batch) ((uintptr_t)((batch).map_next - (batch).map))
+#define USED_BATCH(batch) ((uintptr_t)((batch)->map_next - (batch)->map))
static inline uint32_t float_as_int(float f)
{
@@ -73,7 +74,7 @@ static inline unsigned
intel_batchbuffer_space(struct brw_batch *batch)
{
return (batch->state_batch_offset - batch->reserved_space)
- - USED_BATCH(*batch) * 4;
+ - USED_BATCH(batch)*4;
}
@@ -96,10 +97,10 @@ intel_batchbuffer_emit_float(struct brw_batch *batch, float f)
static inline void
intel_batchbuffer_begin(struct brw_context *brw, int n, enum brw_gpu_ring ring)
{
- intel_batchbuffer_require_space(brw, n * 4, ring);
+ intel_batchbuffer_require_space(&brw->batch, n * 4, ring);
#ifdef DEBUG
- brw->batch.emit = USED_BATCH(brw->batch);
+ brw->batch.emit = USED_BATCH(&brw->batch);
brw->batch.total = n;
#endif
}
@@ -109,7 +110,7 @@ intel_batchbuffer_advance(struct brw_context *brw)
{
#ifdef DEBUG
brw_batch *batch = &brw->batch;
- unsigned int _n = USED_BATCH(*batch) - batch->emit;
+ unsigned int _n = USED_BATCH(batch) - batch->emit;
assert(batch->total != 0);
if (_n != batch->total) {
fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
diff --git a/src/mesa/drivers/dri/i965/intel_blit.c b/src/mesa/drivers/dri/i965/intel_blit.c
index 748d1f90d1..8c832ac249 100644
--- a/src/mesa/drivers/dri/i965/intel_blit.c
+++ b/src/mesa/drivers/dri/i965/intel_blit.c
@@ -622,7 +622,7 @@ intelEmitCopyBlit(struct brw_context *brw,
unsigned length = brw->gen >= 8 ? 10 : 8;
- intel_batchbuffer_require_space(brw, length * 4, BLT_RING);
+ intel_batchbuffer_require_space(&brw->batch, length * 4, BLT_RING);
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__func__,
src_buffer, src_pitch, src_offset, src_x, src_y,
@@ -799,8 +799,9 @@ intelEmitImmediateColorExpandBlit(struct brw_context *brw,
dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
unsigned xy_setup_blt_length = brw->gen >= 8 ? 10 : 8;
- intel_batchbuffer_require_space(brw, (xy_setup_blt_length * 4) +
- (3 * 4) + dwords * 4, BLT_RING);
+ intel_batchbuffer_require_space(&brw->batch,
+ (xy_setup_blt_length * 4) +
+ (3 * 4) + dwords * 4, BLT_RING);
opcode = XY_SETUP_BLT_CMD;
if (cpp == 4)
--
2.11.0
More information about the mesa-dev
mailing list