[Mesa-dev] [PATCH 15/20] radeonsi: add basic infrastructure for atom-based states

Marek Olšák maraeo at gmail.com
Tue Aug 13 18:08:18 PDT 2013


It's the same as in r600g.

I can't merge si_atom with si_pm4_state, because the latter is too big and
isn't even driven by the dirty flag. Also I'm gonna share the whole streamout
state handling with r600g (just one atom though) and therefore I need the same
interface. The advantage is that almost no streamout code will be needed
in radeonsi and the old code can be removed.

We will also need to port r600_flush_emit and the associated code from r600g,
so that cache flushing takes places before state emission (I think this will
be required for streamout).
---
 src/gallium/drivers/radeonsi/r600_hw_context.c |  8 ++++++++
 src/gallium/drivers/radeonsi/radeonsi_pipe.h   | 10 ++++++++++
 src/gallium/drivers/radeonsi/si_state.h        |  8 ++++++++
 src/gallium/drivers/radeonsi/si_state_draw.c   |  8 +++++++-
 4 files changed, 33 insertions(+), 1 deletion(-)

diff --git a/src/gallium/drivers/radeonsi/r600_hw_context.c b/src/gallium/drivers/radeonsi/r600_hw_context.c
index 19e9d1c..c9a613b 100644
--- a/src/gallium/drivers/radeonsi/r600_hw_context.c
+++ b/src/gallium/drivers/radeonsi/r600_hw_context.c
@@ -114,9 +114,17 @@ err:
 void si_need_cs_space(struct r600_context *ctx, unsigned num_dw,
 			boolean count_draw_in)
 {
+	int i;
+
 	/* The number of dwords we already used in the CS so far. */
 	num_dw += ctx->cs->cdw;
 
+	for (i = 0; i < SI_NUM_ATOMS(ctx); i++) {
+		if (ctx->atoms.array[i]->dirty) {
+			num_dw += ctx->atoms.array[i]->num_dw;
+		}
+	}
+
 	if (count_draw_in) {
 		/* The number of dwords all the dirty states would take. */
 		num_dw += ctx->pm4_dirty_cdwords;
diff --git a/src/gallium/drivers/radeonsi/radeonsi_pipe.h b/src/gallium/drivers/radeonsi/radeonsi_pipe.h
index e370149..b4a6e0c 100644
--- a/src/gallium/drivers/radeonsi/radeonsi_pipe.h
+++ b/src/gallium/drivers/radeonsi/radeonsi_pipe.h
@@ -132,6 +132,8 @@ struct r600_constbuf_state
 	uint32_t			dirty_mask;
 };
 
+#define SI_NUM_ATOMS(rctx) (sizeof((rctx)->atoms)/sizeof((rctx)->atoms.array[0]))
+
 struct r600_context {
 	struct pipe_context		context;
 	struct blitter_context		*blitter;
@@ -145,6 +147,14 @@ struct r600_context {
 	void				*custom_blend_decompress;
 	struct r600_screen		*screen;
 	struct radeon_winsys		*ws;
+
+	union {
+		struct {
+			/* Place atoms here. */
+		};
+		struct si_atom *array[0];
+	} atoms;
+
 	struct si_vertex_element	*vertex_elements;
 	struct pipe_framebuffer_state	framebuffer;
 	unsigned			fb_log_samples;
diff --git a/src/gallium/drivers/radeonsi/si_state.h b/src/gallium/drivers/radeonsi/si_state.h
index b01fbf2..09ef56e 100644
--- a/src/gallium/drivers/radeonsi/si_state.h
+++ b/src/gallium/drivers/radeonsi/si_state.h
@@ -29,6 +29,14 @@
 
 #include "radeonsi_pm4.h"
 
+/* This encapsulates a state or an operation which can emitted into the GPU
+ * command stream. */
+struct si_atom {
+	void (*emit)(struct r600_context *ctx, struct si_atom *state);
+	unsigned		num_dw;
+	bool			dirty;
+};
+
 struct si_state_blend {
 	struct si_pm4_state	pm4;
 	uint32_t		cb_target_mask;
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c
index 2007dc4..b951a39 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.c
+++ b/src/gallium/drivers/radeonsi/si_state_draw.c
@@ -665,7 +665,7 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
 {
 	struct r600_context *rctx = (struct r600_context *)ctx;
 	struct pipe_index_buffer ib = {};
-	uint32_t cp_coher_cntl;
+	uint32_t cp_coher_cntl, i;
 
 	if (!info->count && (info->indexed || !info->count_from_stream_output))
 		return;
@@ -729,6 +729,12 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
 
 	si_need_cs_space(rctx, 0, TRUE);
 
+	for (i = 0; i < SI_NUM_ATOMS(rctx); i++) {
+		if (rctx->atoms.array[i]->dirty) {
+			rctx->atoms.array[i]->emit(rctx, rctx->atoms.array[i]);
+		}
+	}
+
 	si_pm4_emit_dirty(rctx);
 	rctx->pm4_dirty_cdwords = 0;
 
-- 
1.8.1.2



More information about the mesa-dev mailing list