[Mesa-dev] [PATCH 6/6] r600g: handle DISCARD_WHOLE_RESOURCE for buffers

Marek Olšák maraeo at gmail.com
Mon Apr 2 08:10:58 PDT 2012


This should prevent stalls and therefore increase perfomance in some cases.
---
 src/gallium/drivers/r600/r600_buffer.c       |   59 ++++++++++++++++++++++++++
 src/gallium/drivers/r600/r600_state_common.c |    2 +-
 2 files changed, 60 insertions(+), 1 deletions(-)

diff --git a/src/gallium/drivers/r600/r600_buffer.c b/src/gallium/drivers/r600/r600_buffer.c
index b275319..f9f32b2 100644
--- a/src/gallium/drivers/r600/r600_buffer.c
+++ b/src/gallium/drivers/r600/r600_buffer.c
@@ -61,6 +61,25 @@ static struct pipe_transfer *r600_get_transfer(struct pipe_context *ctx,
 	return transfer;
 }
 
+static void r600_set_constants_dirty_if_bound(struct r600_context *rctx,
+					      struct r600_constbuf_state *state,
+					      struct r600_resource *rbuffer)
+{
+	bool found = false;
+	uint32_t mask = state->enabled_mask;
+
+	while (mask) {
+		unsigned i = u_bit_scan(&mask);
+		if (state->cb[i].buffer == &rbuffer->b.b.b) {
+			found = true;
+			state->dirty_mask |= 1 << i;
+		}
+	}
+	if (found) {
+		r600_constant_buffers_dirty(rctx, state);
+	}
+}
+
 static void *r600_buffer_transfer_map(struct pipe_context *pipe,
 				      struct pipe_transfer *transfer)
 {
@@ -68,6 +87,46 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe,
 	struct r600_context *rctx = (struct r600_context*)pipe;
 	uint8_t *data;
 
+	if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+		/* When mapping for read, we only need to check if the GPU is writing to it. */
+		enum radeon_bo_usage rusage = transfer->usage & PIPE_TRANSFER_WRITE ?
+			RADEON_USAGE_READWRITE : RADEON_USAGE_WRITE;
+
+		/* Check if mapping this buffer would cause waiting for the GPU. */
+		if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, rusage) ||
+		    rctx->ws->buffer_is_busy(rbuffer->buf, rusage)) {
+			unsigned i;
+
+			/* Discard the buffer. */
+			pb_reference(&rbuffer->buf, NULL);
+
+			/* Create a new one in the same pipe_resource. */
+			/* XXX We probably want a different alignment for buffers and textures. */
+			r600_init_resource(rctx->screen, rbuffer, rbuffer->b.b.b.width0, 4096,
+					   rbuffer->b.b.b.bind, rbuffer->b.b.b.usage);
+
+			/* We changed the buffer, now we need to bind it where the old one was bound. */
+			/* Vertex buffers. */
+			for (i = 0; i < rctx->vbuf_mgr->nr_vertex_buffers; i++) {
+				if (rctx->vbuf_mgr->vertex_buffer[i].buffer == &rbuffer->b.b.b) {
+					r600_inval_vertex_cache(rctx);
+					r600_atom_dirty(rctx, &rctx->vertex_buffer_state);
+				}
+			}
+			/* Streamout buffers. */
+			for (i = 0; i < rctx->num_so_targets; i++) {
+				if (rctx->so_targets[i]->b.buffer == &rbuffer->b.b.b) {
+					r600_context_streamout_end(rctx);
+					rctx->streamout_start = TRUE;
+					rctx->streamout_append_bitmask = ~0;
+				}
+			}
+			/* Constant buffers. */
+			r600_set_constants_dirty_if_bound(rctx, &rctx->vs_constbuf_state, rbuffer);
+			r600_set_constants_dirty_if_bound(rctx, &rctx->ps_constbuf_state, rbuffer);
+		}
+	}
+
 	if (rbuffer->b.user_ptr)
 		return (uint8_t*)rbuffer->b.user_ptr + transfer->box.x;
 
diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c
index 22deb47..5f048e6 100644
--- a/src/gallium/drivers/r600/r600_state_common.c
+++ b/src/gallium/drivers/r600/r600_state_common.c
@@ -521,6 +521,7 @@ static void r600_update_alpha_ref(struct r600_context *rctx)
 
 void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state)
 {
+	r600_inval_shader_cache(rctx);
 	state->atom.num_dw = rctx->chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
 							   : util_bitcount(state->dirty_mask)*19;
 	r600_atom_dirty(rctx, &state->atom);
@@ -556,7 +557,6 @@ void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index,
 		return;
 	}
 
-	r600_inval_shader_cache(rctx);
 	r600_upload_const_buffer(rctx, &rbuffer, &offset);
 
 	cb = &state->cb[index];
-- 
1.7.5.4



More information about the mesa-dev mailing list