[Mesa-dev] [PATCH 1/1] i965: Make sure the shadow buffers have enough space
James Xiong
james.xiong at intel.com
Mon Apr 9 23:06:16 UTC 2018
From: "Xiong, James" <james.xiong at intel.com>
On non-LLC platforms, we malloc shadow batch/state buffers
of the same sizes as our batch/state buffers' GEM allocations.
However the buffer allocator reuses similar-sized gem objects,
it returns a buffer larger than we asked for in some cases
and we end up with smaller shadow buffers. If we utilize the
full-size of the over-allocated batch/state buffers, we may wind
up accessing beyond the bounds of the shadow buffers and cause
segmentation fault and/or memory corruption.
A few examples:
case batch state
request bo shadow request bo shadow
init 0 20K 20K 20K 16K 16K 16K
grow_buffer 1 30K 32K 30K 24K 24K 24K
grow_buffer 2 48K 48K 48K 36K 40K 36K
grow_buffer 3 72K 80K 72K 60K 64K 60K
grow_buffer 4 120K 128K 120K - - -
batch #1, #3, #4; state #2 and #3 are problematic. We can change
the order to allocate the bo first, then allocate the shadow
buffer using the bo's size so that the shadow buffer have at
least an equivalent size of the gem allocation.
Another problem: even though the state/batch buffer could grow,
when checking if it runs out space, we always compare with the
initial batch/state sizes. To utilize the entire buffers, change
to compare with the actual sizes.
Cc: mesa-stable at lists.freedesktop.org
Signed-off-by: Xiong, James <james.xiong at intel.com>
---
src/mesa/drivers/dri/i965/brw_context.h | 1 +
src/mesa/drivers/dri/i965/intel_batchbuffer.c | 49 +++++++++++++++++----------
2 files changed, 32 insertions(+), 18 deletions(-)
diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h
index f049d08..39aae08 100644
--- a/src/mesa/drivers/dri/i965/brw_context.h
+++ b/src/mesa/drivers/dri/i965/brw_context.h
@@ -477,6 +477,7 @@ struct brw_growing_bo {
struct brw_bo *partial_bo;
uint32_t *partial_bo_map;
unsigned partial_bytes;
+ unsigned shadow_size;
};
struct intel_batchbuffer {
diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
index 7286140..facbbf8 100644
--- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c
@@ -107,12 +107,6 @@ intel_batchbuffer_init(struct brw_context *brw)
batch->use_shadow_copy = !devinfo->has_llc;
- if (batch->use_shadow_copy) {
- batch->batch.map = malloc(BATCH_SZ);
- batch->map_next = batch->batch.map;
- batch->state.map = malloc(STATE_SZ);
- }
-
init_reloc_list(&batch->batch_relocs, 250);
init_reloc_list(&batch->state_relocs, 250);
@@ -212,10 +206,25 @@ intel_batchbuffer_reset(struct brw_context *brw)
batch->last_bo = batch->batch.bo;
recreate_growing_buffer(brw, &batch->batch, "batchbuffer", BATCH_SZ);
- batch->map_next = batch->batch.map;
recreate_growing_buffer(brw, &batch->state, "statebuffer", STATE_SZ);
+ if (batch->use_shadow_copy) {
+ if (batch->batch.shadow_size < batch->batch.bo->size) {
+ free(batch->batch.map);
+ batch->batch.map = malloc(batch->batch.bo->size);
+ batch->batch.shadow_size = batch->batch.bo->size;
+ }
+
+ if (batch->state.shadow_size < batch->state.bo->size) {
+ free(batch->state.map);
+ batch->state.map = malloc(batch->state.bo->size);
+ batch->state.shadow_size = batch->state.bo->size;
+ }
+ }
+
+ batch->map_next = batch->batch.map;
+
/* Avoid making 0 a valid state offset - otherwise the decoder will try
* and decode data when we use offset 0 as a null pointer.
*/
@@ -361,7 +370,8 @@ grow_buffer(struct brw_context *brw,
* breaking existing pointers the caller may still be using. Just
* malloc a new copy and memcpy it like the normal BO path.
*/
- grow->map = malloc(new_size);
+ grow->map = malloc(new_bo->size);
+ grow->shadow_size = new_bo->size;
} else {
grow->map = brw_bo_map(brw, new_bo, MAP_READ | MAP_WRITE);
}
@@ -467,15 +477,17 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
}
const unsigned batch_used = USED_BATCH(*batch) * 4;
- if (batch_used + sz >= BATCH_SZ && !batch->no_wrap) {
- intel_batchbuffer_flush(brw);
- } else if (batch_used + sz >= batch->batch.bo->size) {
- const unsigned new_size =
- MIN2(batch->batch.bo->size + batch->batch.bo->size / 2,
- MAX_BATCH_SIZE);
- grow_buffer(brw, &batch->batch, batch_used, new_size);
- batch->map_next = (void *) batch->batch.map + batch_used;
- assert(batch_used + sz < batch->batch.bo->size);
+ if (batch_used + sz >= batch->batch.bo->size) {
+ if (!batch->no_wrap) {
+ intel_batchbuffer_flush(brw);
+ } else {
+ const unsigned new_size =
+ MIN2(batch->batch.bo->size + batch->batch.bo->size / 2,
+ MAX_BATCH_SIZE);
+ grow_buffer(brw, &batch->batch, batch_used, new_size);
+ batch->map_next = (void *) batch->batch.map + batch_used;
+ assert(batch_used + sz < batch->batch.bo->size);
+ }
}
/* The intel_batchbuffer_flush() calls above might have changed
@@ -1168,8 +1180,9 @@ brw_state_batch_size(struct brw_context *brw, uint32_t offset)
void
brw_require_statebuffer_space(struct brw_context *brw, int size)
{
- if (brw->batch.state_used + size >= STATE_SZ)
+ if (brw->batch.state_used + size >= brw->batch.state.bo->size ) {
intel_batchbuffer_flush(brw);
+ }
}
/**
--
2.7.4
More information about the mesa-dev
mailing list