[Mesa-dev] [PATCH 06/14] radeonsi/gfx9: implement the scissor bug workaround without performance drop
Marek Olšák
maraeo at gmail.com
Fri Jun 1 05:21:14 UTC 2018
From: Marek Olšák <marek.olsak at amd.com>
This might improve performance on Vega10 and Raven.
---
src/gallium/drivers/radeonsi/si_state.h | 46 ++++++++++++--
src/gallium/drivers/radeonsi/si_state_draw.c | 64 ++++++++++++--------
2 files changed, 81 insertions(+), 29 deletions(-)
diff --git a/src/gallium/drivers/radeonsi/si_state.h b/src/gallium/drivers/radeonsi/si_state.h
index d235f31c792..9da58ac9710 100644
--- a/src/gallium/drivers/radeonsi/si_state.h
+++ b/src/gallium/drivers/radeonsi/si_state.h
@@ -165,22 +165,38 @@ union si_state {
struct si_pm4_state *hs;
struct si_pm4_state *es;
struct si_pm4_state *gs;
struct si_pm4_state *vgt_shader_config;
struct si_pm4_state *vs;
struct si_pm4_state *ps;
} named;
struct si_pm4_state *array[0];
};
+#define SI_STATE_IDX(name) \
+ (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
+#define SI_STATE_BIT(name) (1 << SI_STATE_IDX(name))
#define SI_NUM_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
+static inline unsigned si_states_that_roll_context(void)
+{
+ return (SI_STATE_BIT(blend) |
+ SI_STATE_BIT(rasterizer) |
+ SI_STATE_BIT(dsa) |
+ SI_STATE_BIT(poly_offset) |
+ SI_STATE_BIT(es) |
+ SI_STATE_BIT(gs) |
+ SI_STATE_BIT(vgt_shader_config) |
+ SI_STATE_BIT(vs) |
+ SI_STATE_BIT(ps));
+}
+
union si_state_atoms {
struct {
/* The order matters. */
struct si_atom render_cond;
struct si_atom streamout_begin;
struct si_atom streamout_enable; /* must be after streamout_begin */
struct si_atom framebuffer;
struct si_atom msaa_sample_locs;
struct si_atom db_render_state;
struct si_atom dpbb_state;
@@ -193,22 +209,45 @@ union si_state_atoms {
struct si_atom shader_pointers;
struct si_atom scissors;
struct si_atom viewports;
struct si_atom stencil_ref;
struct si_atom spi_map;
struct si_atom scratch_state;
} s;
struct si_atom array[0];
};
+#define SI_ATOM_BIT(name) (1 << (offsetof(union si_state_atoms, s.name) / \
+ sizeof(struct si_atom)))
#define SI_NUM_ATOMS (sizeof(union si_state_atoms)/sizeof(struct si_atom*))
+static inline unsigned si_atoms_that_roll_context(void)
+{
+ return (SI_ATOM_BIT(streamout_begin) |
+ SI_ATOM_BIT(streamout_enable) |
+ SI_ATOM_BIT(framebuffer) |
+ SI_ATOM_BIT(msaa_sample_locs) |
+ SI_ATOM_BIT(db_render_state) |
+ SI_ATOM_BIT(dpbb_state) |
+ SI_ATOM_BIT(msaa_config) |
+ SI_ATOM_BIT(sample_mask) |
+ SI_ATOM_BIT(cb_render_state) |
+ SI_ATOM_BIT(blend_color) |
+ SI_ATOM_BIT(clip_regs) |
+ SI_ATOM_BIT(clip_state) |
+ SI_ATOM_BIT(scissors) |
+ SI_ATOM_BIT(viewports) |
+ SI_ATOM_BIT(stencil_ref) |
+ SI_ATOM_BIT(spi_map) |
+ SI_ATOM_BIT(scratch_state));
+}
+
struct si_shader_data {
uint32_t sh_base[SI_NUM_SHADERS];
};
/* Private read-write buffer slots. */
enum {
SI_ES_RING_ESGS,
SI_GS_RING_ESGS,
SI_RING_GSVS,
@@ -299,42 +338,39 @@ struct si_buffer_resources {
enum radeon_bo_usage shader_usage:4; /* READ, WRITE, or READWRITE */
enum radeon_bo_usage shader_usage_constbuf:4;
enum radeon_bo_priority priority:6;
enum radeon_bo_priority priority_constbuf:6;
/* The i-th bit is set if that element is enabled (non-NULL resource). */
unsigned enabled_mask;
};
-#define si_pm4_block_idx(member) \
- (offsetof(union si_state, named.member) / sizeof(struct si_pm4_state *))
-
#define si_pm4_state_changed(sctx, member) \
((sctx)->queued.named.member != (sctx)->emitted.named.member)
#define si_pm4_state_enabled_and_changed(sctx, member) \
((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
#define si_pm4_bind_state(sctx, member, value) \
do { \
(sctx)->queued.named.member = (value); \
- (sctx)->dirty_states |= 1 << si_pm4_block_idx(member); \
+ (sctx)->dirty_states |= SI_STATE_BIT(member); \
} while(0)
#define si_pm4_delete_state(sctx, member, value) \
do { \
if ((sctx)->queued.named.member == (value)) { \
(sctx)->queued.named.member = NULL; \
} \
si_pm4_free_state(sctx, (struct si_pm4_state *)(value), \
- si_pm4_block_idx(member)); \
+ SI_STATE_IDX(member)); \
} while(0)
/* si_descriptors.c */
void si_set_mutable_tex_desc_fields(struct si_screen *sscreen,
struct r600_texture *tex,
const struct legacy_surf_level *base_level_info,
unsigned base_level, unsigned first_level,
unsigned block_width, bool is_stencil,
uint32_t *state);
void si_update_ps_colorbuf0_slot(struct si_context *sctx);
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c
index 42522c01291..5588c9a2c53 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.c
+++ b/src/gallium/drivers/radeonsi/si_state_draw.c
@@ -59,21 +59,21 @@ static unsigned si_conv_pipe_prim(unsigned mode)
return prim_conv[mode];
}
/**
* This calculates the LDS size for tessellation shaders (VS, TCS, TES).
* LS.LDS_SIZE is shared by all 3 shader stages.
*
* The information about LDS and other non-compile-time parameters is then
* written to userdata SGPRs.
*/
-static void si_emit_derived_tess_state(struct si_context *sctx,
+static bool si_emit_derived_tess_state(struct si_context *sctx,
const struct pipe_draw_info *info,
unsigned *num_patches)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
struct si_shader *ls_current;
struct si_shader_selector *ls;
/* The TES pointer will only be used for sctx->last_tcs.
* It would be wrong to think that TCS = TES. */
struct si_shader_selector *tcs =
sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
@@ -103,21 +103,21 @@ static void si_emit_derived_tess_state(struct si_context *sctx,
ls = sctx->vs_shader.cso;
}
if (sctx->last_ls == ls_current &&
sctx->last_tcs == tcs &&
sctx->last_tes_sh_base == tes_sh_base &&
sctx->last_num_tcs_input_cp == num_tcs_input_cp &&
(!has_primid_instancing_bug ||
(sctx->last_tess_uses_primid == tess_uses_primid))) {
*num_patches = sctx->last_num_patches;
- return;
+ return false;
}
sctx->last_ls = ls_current;
sctx->last_tcs = tcs;
sctx->last_tes_sh_base = tes_sh_base;
sctx->last_num_tcs_input_cp = num_tcs_input_cp;
sctx->last_tess_uses_primid = tess_uses_primid;
/* This calculates how shader inputs and outputs among VS, TCS, and TES
* are laid out in LDS. */
@@ -282,21 +282,23 @@ static void si_emit_derived_tess_state(struct si_context *sctx,
if (sctx->last_ls_hs_config != ls_hs_config) {
if (sctx->chip_class >= CIK) {
radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2,
ls_hs_config);
} else {
radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG,
ls_hs_config);
}
sctx->last_ls_hs_config = ls_hs_config;
+ return true; /* true if the context rolls */
}
+ return false;
}
static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info)
{
switch (info->mode) {
case PIPE_PRIM_PATCHES:
return info->count / info->vertices_per_patch;
case SI_PRIM_RECTANGLE_LIST:
return info->count / 3;
default:
@@ -505,47 +507,48 @@ static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
(info->instance_count > 1 &&
(info->count_from_stream_output ||
si_num_prims_for_vertices(info) <= 1))))
sctx->flags |= SI_CONTEXT_VGT_FLUSH;
}
return ia_multi_vgt_param;
}
/* rast_prim is the primitive type after GS. */
-static void si_emit_rasterizer_prim_state(struct si_context *sctx)
+static bool si_emit_rasterizer_prim_state(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
enum pipe_prim_type rast_prim = sctx->current_rast_prim;
- struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer;
+ struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
/* Skip this if not rendering lines. */
if (rast_prim != PIPE_PRIM_LINES &&
rast_prim != PIPE_PRIM_LINE_LOOP &&
rast_prim != PIPE_PRIM_LINE_STRIP &&
rast_prim != PIPE_PRIM_LINES_ADJACENCY &&
rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY)
- return;
+ return false;
if (rast_prim == sctx->last_rast_prim &&
rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
- return;
+ return false;
/* For lines, reset the stipple pattern at each primitive. Otherwise,
* reset the stipple pattern at each packet (line strips, line loops).
*/
radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
rs->pa_sc_line_stipple |
S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
sctx->last_rast_prim = rast_prim;
sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
+ return true; /* true if the context rolls */
}
static void si_emit_vs_state(struct si_context *sctx,
const struct pipe_draw_info *info)
{
sctx->current_vs_state &= C_VS_STATE_INDEXED;
sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->index_size);
if (sctx->num_vs_blit_sgprs) {
/* Re-emit the state after we leave u_blitter. */
@@ -558,20 +561,28 @@ static void si_emit_vs_state(struct si_context *sctx,
radeon_set_sh_reg(cs,
sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
SI_SGPR_VS_STATE_BITS * 4,
sctx->current_vs_state);
sctx->last_vs_state = sctx->current_vs_state;
}
}
+static inline bool si_prim_restart_index_changed(struct si_context *sctx,
+ const struct pipe_draw_info *info)
+{
+ return info->primitive_restart &&
+ (info->restart_index != sctx->last_restart_index ||
+ sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
+}
+
static void si_emit_draw_registers(struct si_context *sctx,
const struct pipe_draw_info *info,
unsigned num_patches)
{
struct radeon_winsys_cs *cs = sctx->gfx_cs;
unsigned prim = si_conv_pipe_prim(info->mode);
unsigned ia_multi_vgt_param;
ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
@@ -600,23 +611,21 @@ static void si_emit_draw_registers(struct si_context *sctx,
if (sctx->chip_class >= GFX9)
radeon_set_uconfig_reg(cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
info->primitive_restart);
else
radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
info->primitive_restart);
sctx->last_primitive_restart_en = info->primitive_restart;
}
- if (info->primitive_restart &&
- (info->restart_index != sctx->last_restart_index ||
- sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
+ if (si_prim_restart_index_changed(sctx, info)) {
radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
info->restart_index);
sctx->last_restart_index = info->restart_index;
}
}
static void si_emit_draw_packets(struct si_context *sctx,
const struct pipe_draw_info *info,
struct pipe_resource *indexbuf,
unsigned index_size,
@@ -1145,20 +1154,42 @@ static void si_get_draw_start_count(struct si_context *sctx,
}
} else {
*start = info->start;
*count = info->count;
}
}
static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
unsigned skip_atom_mask)
{
+ unsigned num_patches = 0;
+ bool context_roll = false; /* set correctly for GFX9 only */
+
+ context_roll |= si_emit_rasterizer_prim_state(sctx);
+ if (sctx->tes_shader.cso)
+ context_roll |= si_emit_derived_tess_state(sctx, info, &num_patches);
+ if (info->count_from_stream_output)
+ context_roll = true;
+
+ /* Vega10/Raven scissor bug workaround. When any context register is
+ * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
+ * registers must be written too.
+ */
+ if ((sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
+ (context_roll ||
+ sctx->dirty_atoms & si_atoms_that_roll_context() ||
+ sctx->dirty_states & si_states_that_roll_context() ||
+ si_prim_restart_index_changed(sctx, info))) {
+ sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
+ si_mark_atom_dirty(sctx, &sctx->atoms.s.scissors);
+ }
+
/* Emit state atoms. */
unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
while (mask)
sctx->atoms.array[u_bit_scan(&mask)].emit(sctx);
sctx->dirty_atoms &= skip_atom_mask;
/* Emit states. */
mask = sctx->dirty_states;
while (mask) {
@@ -1167,25 +1198,20 @@ static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_i
if (!state || sctx->emitted.array[i] == state)
continue;
si_pm4_emit(sctx, state);
sctx->emitted.array[i] = state;
}
sctx->dirty_states = 0;
/* Emit draw states. */
- unsigned num_patches = 0;
-
- si_emit_rasterizer_prim_state(sctx);
- if (sctx->tes_shader.cso)
- si_emit_derived_tess_state(sctx, info, &num_patches);
si_emit_vs_state(sctx, info);
si_emit_draw_registers(sctx, info, num_patches);
}
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct pipe_resource *indexbuf = info->index.resource;
unsigned dirty_tex_counter;
@@ -1377,30 +1403,20 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
si_need_gfx_cs_space(sctx);
/* Since we've called si_context_add_resource_size for vertex buffers,
* this must be called after si_need_cs_space, because we must let
* need_cs_space flush before we add buffers to the buffer list.
*/
if (!si_upload_vertex_buffer_descriptors(sctx))
return;
- /* Vega10/Raven scissor bug workaround. This must be done before VPORT
- * scissor registers are changed. There is also a more efficient but
- * more involved alternative workaround.
- */
- if ((sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
- si_is_atom_dirty(sctx, &sctx->atoms.s.scissors)) {
- sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
- si_emit_cache_flush(sctx);
- }
-
/* Use optimal packet order based on whether we need to sync the pipeline. */
if (unlikely(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
SI_CONTEXT_FLUSH_AND_INV_DB |
SI_CONTEXT_PS_PARTIAL_FLUSH |
SI_CONTEXT_CS_PARTIAL_FLUSH))) {
/* If we have to wait for idle, set all states first, so that all
* SET packets are processed in parallel with previous draw calls.
* Then draw and prefetch at the end. This ensures that the time
* the CUs are idle is very short.
*/
--
2.17.0
More information about the mesa-dev
mailing list