[Mesa-dev] [PATCH 1/7] radeonsi: import r600_streamout from drivers/radeon
Marek Olšák
maraeo at gmail.com
Sat Oct 7 22:47:00 UTC 2017
From: Marek Olšák <marek.olsak at amd.com>
---
src/gallium/drivers/radeon/Makefile.sources | 1 -
src/gallium/drivers/radeon/r600_pipe_common.c | 12 --
src/gallium/drivers/radeon/r600_pipe_common.h | 57 -------
src/gallium/drivers/radeon/r600_query.c | 4 +
src/gallium/drivers/radeonsi/Makefile.sources | 1 +
src/gallium/drivers/radeonsi/si_blit.c | 4 +-
src/gallium/drivers/radeonsi/si_descriptors.c | 20 +--
src/gallium/drivers/radeonsi/si_hw_context.c | 13 +-
src/gallium/drivers/radeonsi/si_pipe.c | 1 +
src/gallium/drivers/radeonsi/si_pipe.h | 44 ++++++
src/gallium/drivers/radeonsi/si_state.c | 4 +-
src/gallium/drivers/radeonsi/si_state.h | 11 ++
src/gallium/drivers/radeonsi/si_state_draw.c | 6 +-
src/gallium/drivers/radeonsi/si_state_shaders.c | 4 +-
.../si_state_streamout.c} | 176 +++++++++++----------
15 files changed, 181 insertions(+), 177 deletions(-)
rename src/gallium/drivers/{radeon/r600_streamout.c => radeonsi/si_state_streamout.c} (58%)
diff --git a/src/gallium/drivers/radeon/Makefile.sources b/src/gallium/drivers/radeon/Makefile.sources
index 5d38bb3..c32ebea 100644
--- a/src/gallium/drivers/radeon/Makefile.sources
+++ b/src/gallium/drivers/radeon/Makefile.sources
@@ -1,21 +1,20 @@
C_SOURCES := \
cayman_msaa.c \
r600_buffer_common.c \
r600_cs.h \
r600_gpu_load.c \
r600_perfcounter.c \
r600_pipe_common.c \
r600_pipe_common.h \
r600_query.c \
r600_query.h \
- r600_streamout.c \
r600_test_dma.c \
r600_texture.c \
radeon_uvd.c \
radeon_uvd.h \
radeon_vcn_dec.c \
radeon_vcn_dec.h \
radeon_vce_40_2_2.c \
radeon_vce_50.c \
radeon_vce_52.c \
radeon_vce.c \
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c
index da61580..a6008a1 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.c
+++ b/src/gallium/drivers/radeon/r600_pipe_common.c
@@ -289,35 +289,24 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
static void r600_memory_barrier(struct pipe_context *ctx, unsigned flags)
{
}
void si_preflush_suspend_features(struct r600_common_context *ctx)
{
/* suspend queries */
if (!LIST_IS_EMPTY(&ctx->active_queries))
si_suspend_queries(ctx);
-
- ctx->streamout.suspended = false;
- if (ctx->streamout.begin_emitted) {
- si_emit_streamout_end(ctx);
- ctx->streamout.suspended = true;
- }
}
void si_postflush_resume_features(struct r600_common_context *ctx)
{
- if (ctx->streamout.suspended) {
- ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
- si_streamout_buffers_dirty(ctx);
- }
-
/* resume queries */
if (!LIST_IS_EMPTY(&ctx->active_queries))
si_resume_queries(ctx);
}
static void r600_add_fence_dependency(struct r600_common_context *rctx,
struct pipe_fence_handle *fence)
{
struct radeon_winsys *ws = rctx->ws;
@@ -640,21 +629,20 @@ bool si_common_context_init(struct r600_common_context *rctx,
if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 43) {
rctx->b.get_device_reset_status = r600_get_reset_status;
rctx->gpu_reset_counter =
rctx->ws->query_value(rctx->ws,
RADEON_GPU_RESET_COUNTER);
}
rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
si_init_context_texture_functions(rctx);
- si_streamout_init(rctx);
si_init_query_functions(rctx);
si_init_msaa(&rctx->b);
if (rctx->chip_class == CIK ||
rctx->chip_class == VI ||
rctx->chip_class == GFX9) {
rctx->eop_bug_scratch = (struct r600_resource*)
pipe_buffer_create(&rscreen->b, 0, PIPE_USAGE_DEFAULT,
16 * rscreen->info.num_render_backends);
if (!rctx->eop_bug_scratch)
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h
index e3cb1cf..b620e6b 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.h
+++ b/src/gallium/drivers/radeon/r600_pipe_common.h
@@ -490,57 +490,20 @@ struct r600_common_screen {
struct radeon_bo_metadata *md);
};
/* This encapsulates a state or an operation which can emitted into the GPU
* command stream. */
struct r600_atom {
void (*emit)(struct r600_common_context *ctx, struct r600_atom *state);
unsigned short id;
};
-struct r600_so_target {
- struct pipe_stream_output_target b;
-
- /* The buffer where BUFFER_FILLED_SIZE is stored. */
- struct r600_resource *buf_filled_size;
- unsigned buf_filled_size_offset;
- bool buf_filled_size_valid;
-
- unsigned stride_in_dw;
-};
-
-struct r600_streamout {
- struct r600_atom begin_atom;
- bool begin_emitted;
-
- unsigned enabled_mask;
- unsigned num_targets;
- struct r600_so_target *targets[PIPE_MAX_SO_BUFFERS];
-
- unsigned append_bitmask;
- bool suspended;
-
- /* External state which comes from the vertex shader,
- * it must be set explicitly when binding a shader. */
- uint16_t *stride_in_dw;
- unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
-
- /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
- unsigned hw_enabled_mask;
-
- /* The state of VGT_STRMOUT_(CONFIG|EN). */
- struct r600_atom enable_atom;
- bool streamout_enabled;
- bool prims_gen_query_enabled;
- int num_prims_gen_queries;
-};
-
struct r600_ring {
struct radeon_winsys_cs *cs;
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence);
};
/* Saved CS data for debugging features. */
struct radeon_saved_cs {
uint32_t *ib;
unsigned num_dw;
@@ -571,23 +534,20 @@ struct r600_common_context {
struct threaded_context *tc;
struct u_suballocator *allocator_zeroed_memory;
struct slab_child_pool pool_transfers;
struct slab_child_pool pool_transfers_unsync; /* for threaded_context */
/* Current unaccounted memory usage. */
uint64_t vram;
uint64_t gtt;
- /* States. */
- struct r600_streamout streamout;
-
/* Additional context states. */
unsigned flags; /* flush flags */
/* Queries. */
/* Maintain the list of active queries for pausing between IBs. */
int num_occlusion_queries;
int num_perfect_occlusion_queries;
struct list_head active_queries;
unsigned num_cs_dw_queries_suspend;
/* Misc stats. */
@@ -783,31 +743,20 @@ unsigned si_end_counter(struct r600_common_screen *rscreen, unsigned type,
/* r600_perfcounters.c */
void si_perfcounters_destroy(struct r600_common_screen *rscreen);
/* r600_query.c */
void si_init_screen_query_functions(struct r600_common_screen *rscreen);
void si_init_query_functions(struct r600_common_context *rctx);
void si_suspend_queries(struct r600_common_context *ctx);
void si_resume_queries(struct r600_common_context *ctx);
-/* r600_streamout.c */
-void si_streamout_buffers_dirty(struct r600_common_context *rctx);
-void si_common_set_streamout_targets(struct pipe_context *ctx,
- unsigned num_targets,
- struct pipe_stream_output_target **targets,
- const unsigned *offset);
-void si_emit_streamout_end(struct r600_common_context *rctx);
-void si_update_prims_generated_query_state(struct r600_common_context *rctx,
- unsigned type, int diff);
-void si_streamout_init(struct r600_common_context *rctx);
-
/* r600_test_dma.c */
void si_test_dma(struct r600_common_screen *rscreen);
/* r600_texture.c */
bool si_prepare_for_dma_blit(struct r600_common_context *rctx,
struct r600_texture *rdst,
unsigned dst_level, unsigned dstx,
unsigned dsty, unsigned dstz,
struct r600_texture *rsrc,
unsigned src_level,
@@ -893,26 +842,20 @@ r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_resource *res = (struct r600_resource *)r;
if (res) {
/* Add memory usage for need_gfx_cs_space */
rctx->vram += res->vram_usage;
rctx->gtt += res->gart_usage;
}
}
-static inline bool r600_get_strmout_en(struct r600_common_context *rctx)
-{
- return rctx->streamout.streamout_enabled ||
- rctx->streamout.prims_gen_query_enabled;
-}
-
#define SQ_TEX_XY_FILTER_POINT 0x00
#define SQ_TEX_XY_FILTER_BILINEAR 0x01
#define SQ_TEX_XY_FILTER_ANISO_POINT 0x02
#define SQ_TEX_XY_FILTER_ANISO_BILINEAR 0x03
static inline unsigned eg_tex_filter(unsigned filter, unsigned max_aniso)
{
if (filter == PIPE_TEX_FILTER_LINEAR)
return max_aniso > 1 ? SQ_TEX_XY_FILTER_ANISO_BILINEAR
: SQ_TEX_XY_FILTER_BILINEAR;
diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c
index adf3522..3abfe1e 100644
--- a/src/gallium/drivers/radeon/r600_query.c
+++ b/src/gallium/drivers/radeon/r600_query.c
@@ -22,20 +22,24 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "r600_query.h"
#include "r600_cs.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
#include "os/os_time.h"
#include "tgsi/tgsi_text.h"
+/* TODO: remove this: */
+void si_update_prims_generated_query_state(struct r600_common_context *rctx,
+ unsigned type, int diff);
+
#define R600_MAX_STREAMS 4
struct r600_hw_query_params {
unsigned start_offset;
unsigned end_offset;
unsigned fence_offset;
unsigned pair_stride;
unsigned pair_count;
};
diff --git a/src/gallium/drivers/radeonsi/Makefile.sources b/src/gallium/drivers/radeonsi/Makefile.sources
index ed3e520..63cd7a3 100644
--- a/src/gallium/drivers/radeonsi/Makefile.sources
+++ b/src/gallium/drivers/radeonsi/Makefile.sources
@@ -23,13 +23,14 @@ C_SOURCES := \
si_shader.h \
si_shader_internal.h \
si_shader_nir.c \
si_shader_tgsi_alu.c \
si_shader_tgsi_mem.c \
si_shader_tgsi_setup.c \
si_state.c \
si_state_binning.c \
si_state_draw.c \
si_state_shaders.c \
+ si_state_streamout.c \
si_state_viewport.c \
si_state.h \
si_uvd.c
diff --git a/src/gallium/drivers/radeonsi/si_blit.c b/src/gallium/drivers/radeonsi/si_blit.c
index 4806e7c..03aa4f7 100644
--- a/src/gallium/drivers/radeonsi/si_blit.c
+++ b/src/gallium/drivers/radeonsi/si_blit.c
@@ -51,22 +51,22 @@ enum si_blitter_op /* bitmask */
};
static void si_blitter_begin(struct pipe_context *ctx, enum si_blitter_op op)
{
struct si_context *sctx = (struct si_context *)ctx;
util_blitter_save_vertex_shader(sctx->blitter, sctx->vs_shader.cso);
util_blitter_save_tessctrl_shader(sctx->blitter, sctx->tcs_shader.cso);
util_blitter_save_tesseval_shader(sctx->blitter, sctx->tes_shader.cso);
util_blitter_save_geometry_shader(sctx->blitter, sctx->gs_shader.cso);
- util_blitter_save_so_targets(sctx->blitter, sctx->b.streamout.num_targets,
- (struct pipe_stream_output_target**)sctx->b.streamout.targets);
+ util_blitter_save_so_targets(sctx->blitter, sctx->streamout.num_targets,
+ (struct pipe_stream_output_target**)sctx->streamout.targets);
util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer);
if (op & SI_SAVE_FRAGMENT_STATE) {
util_blitter_save_blend(sctx->blitter, sctx->queued.named.blend);
util_blitter_save_depth_stencil_alpha(sctx->blitter, sctx->queued.named.dsa);
util_blitter_save_stencil_ref(sctx->blitter, &sctx->stencil_ref.state);
util_blitter_save_fragment_shader(sctx->blitter, sctx->ps_shader.cso);
util_blitter_save_sample_mask(sctx->blitter, sctx->sample_mask.sample_mask);
util_blitter_save_scissor(sctx->blitter, &sctx->scissors.states[0]);
}
diff --git a/src/gallium/drivers/radeonsi/si_descriptors.c b/src/gallium/drivers/radeonsi/si_descriptors.c
index dee8e71..dd1f1e9 100644
--- a/src/gallium/drivers/radeonsi/si_descriptors.c
+++ b/src/gallium/drivers/radeonsi/si_descriptors.c
@@ -1366,37 +1366,37 @@ void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
/* STREAMOUT BUFFERS */
static void si_set_streamout_targets(struct pipe_context *ctx,
unsigned num_targets,
struct pipe_stream_output_target **targets,
const unsigned *offsets)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_buffer_resources *buffers = &sctx->rw_buffers;
struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
- unsigned old_num_targets = sctx->b.streamout.num_targets;
+ unsigned old_num_targets = sctx->streamout.num_targets;
unsigned i, bufidx;
/* We are going to unbind the buffers. Mark which caches need to be flushed. */
- if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
+ if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
/* Since streamout uses vector writes which go through TC L2
* and most other clients can use TC L2 as well, we don't need
* to flush it.
*
* The only cases which requires flushing it is VGT DMA index
* fetching (on <= CIK) and indirect draw data, which are rare
* cases. Thus, flag the TC L2 dirtiness in the resource and
* handle it at draw call time.
*/
- for (i = 0; i < sctx->b.streamout.num_targets; i++)
- if (sctx->b.streamout.targets[i])
- r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
+ for (i = 0; i < sctx->streamout.num_targets; i++)
+ if (sctx->streamout.targets[i])
+ r600_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
/* Invalidate the scalar cache in case a streamout buffer is
* going to be used as a constant buffer.
*
* Invalidate TC L1, because streamout bypasses it (done by
* setting GLC=1 in the store instruction), but it can contain
* outdated data of streamout buffers.
*
* VS_PARTIAL_FLUSH is required if the buffers are going to be
* used as an input immediately.
@@ -1643,25 +1643,25 @@ static void si_rebind_buffer(struct pipe_context *ctx, struct pipe_resource *buf
si_desc_reset_buffer_offset(ctx, descs->list + i*4,
old_va, buf);
sctx->descriptors_dirty |= 1u << SI_DESCS_RW_BUFFERS;
radeon_add_to_buffer_list_check_mem(&sctx->b, &sctx->b.gfx,
rbuffer, buffers->shader_usage,
RADEON_PRIO_SHADER_RW_BUFFER,
true);
/* Update the streamout state. */
- if (sctx->b.streamout.begin_emitted)
- si_emit_streamout_end(&sctx->b);
- sctx->b.streamout.append_bitmask =
- sctx->b.streamout.enabled_mask;
- si_streamout_buffers_dirty(&sctx->b);
+ if (sctx->streamout.begin_emitted)
+ si_emit_streamout_end(sctx);
+ sctx->streamout.append_bitmask =
+ sctx->streamout.enabled_mask;
+ si_streamout_buffers_dirty(sctx);
}
}
/* Constant and shader buffers. */
if (rbuffer->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
for (shader = 0; shader < SI_NUM_SHADERS; shader++)
si_reset_buffer_resources(sctx, &sctx->const_and_shader_buffers[shader],
si_const_and_shader_buffer_descriptors_idx(shader),
u_bit_consecutive(SI_NUM_SHADER_BUFFERS, SI_NUM_CONST_BUFFERS),
buf, old_va,
diff --git a/src/gallium/drivers/radeonsi/si_hw_context.c b/src/gallium/drivers/radeonsi/si_hw_context.c
index 72da54e..317b50c 100644
--- a/src/gallium/drivers/radeonsi/si_hw_context.c
+++ b/src/gallium/drivers/radeonsi/si_hw_context.c
@@ -93,20 +93,26 @@ void si_context_gfx_flush(void *context, unsigned flags,
*/
if (radeon_emitted(ctx->b.dma.cs, 0)) {
assert(fence == NULL); /* internal flushes only */
ctx->b.dma.flush(ctx, flags, NULL);
}
ctx->gfx_flush_in_progress = true;
si_preflush_suspend_features(&ctx->b);
+ ctx->streamout.suspended = false;
+ if (ctx->streamout.begin_emitted) {
+ si_emit_streamout_end(ctx);
+ ctx->streamout.suspended = true;
+ }
+
ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
SI_CONTEXT_PS_PARTIAL_FLUSH;
/* DRM 3.1.0 doesn't flush TC for VI correctly. */
if (ctx->b.chip_class == VI && ctx->b.screen->info.drm_minor <= 1)
ctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2 |
SI_CONTEXT_INV_VMEM_L1;
si_emit_cache_flush(ctx);
@@ -236,37 +242,42 @@ void si_begin_new_cs(struct si_context *ctx)
si_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
si_mark_atom_dirty(ctx, &ctx->cb_render_state);
/* CLEAR_STATE sets zeros. */
if (!has_clear_state || ctx->blend_color.any_nonzeros)
si_mark_atom_dirty(ctx, &ctx->blend_color.atom);
si_mark_atom_dirty(ctx, &ctx->db_render_state);
if (ctx->b.chip_class >= GFX9)
si_mark_atom_dirty(ctx, &ctx->dpbb_state);
si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
si_mark_atom_dirty(ctx, &ctx->spi_map);
- si_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
+ si_mark_atom_dirty(ctx, &ctx->streamout.enable_atom);
si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
si_all_descriptors_begin_new_cs(ctx);
si_all_resident_buffers_begin_new_cs(ctx);
ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
ctx->viewports.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
ctx->viewports.depth_range_dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
si_mark_atom_dirty(ctx, &ctx->scissors.atom);
si_mark_atom_dirty(ctx, &ctx->viewports.atom);
si_mark_atom_dirty(ctx, &ctx->scratch_state);
if (ctx->scratch_buffer) {
r600_context_add_resource_size(&ctx->b.b,
&ctx->scratch_buffer->b.b);
}
+ if (ctx->streamout.suspended) {
+ ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
+ si_streamout_buffers_dirty(ctx);
+ }
+
si_postflush_resume_features(&ctx->b);
assert(!ctx->b.gfx.cs->prev_dw);
ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
/* Invalidate various draw states so that they are emitted before
* the first draw call. */
si_invalidate_draw_sh_constants(ctx);
ctx->last_index_size = -1;
ctx->last_primitive_restart_en = -1;
diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c
index d0b90e7..b9840ad 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.c
+++ b/src/gallium/drivers/radeonsi/si_pipe.c
@@ -198,20 +198,21 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
if (!si_common_context_init(&sctx->b, &sscreen->b, flags))
goto fail;
if (sscreen->b.info.drm_major == 3)
sctx->b.b.get_device_reset_status = si_amdgpu_get_reset_status;
si_init_blit_functions(sctx);
si_init_compute_functions(sctx);
si_init_cp_dma_functions(sctx);
si_init_debug_functions(sctx);
+ si_init_streamout_functions(sctx);
if (sscreen->b.info.has_hw_decode) {
sctx->b.b.create_video_codec = si_uvd_create_decoder;
sctx->b.b.create_video_buffer = si_video_buffer_create;
} else {
sctx->b.b.create_video_codec = vl_create_decoder;
sctx->b.b.create_video_buffer = vl_video_buffer_create;
}
sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX,
diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h
index cf36100..4e54b7e 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.h
+++ b/src/gallium/drivers/radeonsi/si_pipe.h
@@ -248,20 +248,57 @@ struct si_clip_state {
struct si_sample_locs {
struct r600_atom atom;
unsigned nr_samples;
};
struct si_sample_mask {
struct r600_atom atom;
uint16_t sample_mask;
};
+struct si_streamout_target {
+ struct pipe_stream_output_target b;
+
+ /* The buffer where BUFFER_FILLED_SIZE is stored. */
+ struct r600_resource *buf_filled_size;
+ unsigned buf_filled_size_offset;
+ bool buf_filled_size_valid;
+
+ unsigned stride_in_dw;
+};
+
+struct si_streamout {
+ struct r600_atom begin_atom;
+ bool begin_emitted;
+
+ unsigned enabled_mask;
+ unsigned num_targets;
+ struct si_streamout_target *targets[PIPE_MAX_SO_BUFFERS];
+
+ unsigned append_bitmask;
+ bool suspended;
+
+ /* External state which comes from the vertex shader,
+ * it must be set explicitly when binding a shader. */
+ uint16_t *stride_in_dw;
+ unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
+
+ /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
+ unsigned hw_enabled_mask;
+
+ /* The state of VGT_STRMOUT_(CONFIG|EN). */
+ struct r600_atom enable_atom;
+ bool streamout_enabled;
+ bool prims_gen_query_enabled;
+ int num_prims_gen_queries;
+};
+
/* A shader state consists of the shader selector, which is a constant state
* object shared by multiple contexts and shouldn't be modified, and
* the current shader variant selected for this context.
*/
struct si_shader_ctx_state {
struct si_shader_selector *cso;
struct si_shader *current;
};
#define SI_NUM_VGT_PARAM_KEY_BITS 12
@@ -352,20 +389,21 @@ struct si_context {
struct si_sample_mask sample_mask;
struct r600_atom cb_render_state;
unsigned last_cb_target_mask;
struct si_blend_color blend_color;
struct r600_atom clip_regs;
struct si_clip_state clip_state;
struct si_shader_data shader_pointers;
struct si_stencil_ref stencil_ref;
struct r600_atom spi_map;
struct si_scissors scissors;
+ struct si_streamout streamout;
struct si_viewports viewports;
/* Precomputed states. */
struct si_pm4_state *init_config;
struct si_pm4_state *init_config_gs_rings;
bool init_config_has_vgt_flush;
struct si_pm4_state *vgt_shader_config[4];
/* shaders */
struct si_shader_ctx_state ps_shader;
@@ -637,20 +675,26 @@ static inline struct tgsi_shader_info *si_get_vs_info(struct si_context *sctx)
static inline struct si_shader* si_get_vs_state(struct si_context *sctx)
{
if (sctx->gs_shader.cso)
return sctx->gs_shader.cso->gs_copy_shader;
struct si_shader_ctx_state *vs = si_get_vs(sctx);
return vs->current ? vs->current : NULL;
}
+static inline bool si_get_strmout_en(struct si_context *sctx)
+{
+ return sctx->streamout.streamout_enabled ||
+ sctx->streamout.prims_gen_query_enabled;
+}
+
static inline unsigned
si_optimal_tcc_alignment(struct si_context *sctx, unsigned upload_size)
{
unsigned alignment, tcc_cache_line_size;
/* If the upload size is less than the cache line size (e.g. 16, 32),
* the whole thing will fit into a cache line if we align it to its size.
* The idea is that multiple small uploads can share a cache line.
* If the upload size is greater, align it to the cache line size.
*/
diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
index 99c3ca3..82f3962 100644
--- a/src/gallium/drivers/radeonsi/si_state.c
+++ b/src/gallium/drivers/radeonsi/si_state.c
@@ -4400,22 +4400,22 @@ static void si_need_gfx_cs_space(struct pipe_context *ctx, unsigned num_dw,
bool include_draw_vbo)
{
si_need_cs_space((struct si_context*)ctx);
}
static void si_init_config(struct si_context *sctx);
void si_init_state_functions(struct si_context *sctx)
{
si_init_external_atom(sctx, &sctx->b.render_cond_atom, &sctx->atoms.s.render_cond);
- si_init_external_atom(sctx, &sctx->b.streamout.begin_atom, &sctx->atoms.s.streamout_begin);
- si_init_external_atom(sctx, &sctx->b.streamout.enable_atom, &sctx->atoms.s.streamout_enable);
+ si_init_external_atom(sctx, &sctx->streamout.begin_atom, &sctx->atoms.s.streamout_begin);
+ si_init_external_atom(sctx, &sctx->streamout.enable_atom, &sctx->atoms.s.streamout_enable);
si_init_external_atom(sctx, &sctx->scissors.atom, &sctx->atoms.s.scissors);
si_init_external_atom(sctx, &sctx->viewports.atom, &sctx->atoms.s.viewports);
si_init_atom(sctx, &sctx->framebuffer.atom, &sctx->atoms.s.framebuffer, si_emit_framebuffer_state);
si_init_atom(sctx, &sctx->msaa_sample_locs.atom, &sctx->atoms.s.msaa_sample_locs, si_emit_msaa_sample_locs);
si_init_atom(sctx, &sctx->db_render_state, &sctx->atoms.s.db_render_state, si_emit_db_render_state);
si_init_atom(sctx, &sctx->dpbb_state, &sctx->atoms.s.dpbb_state, si_emit_dpbb_state);
si_init_atom(sctx, &sctx->msaa_config, &sctx->atoms.s.msaa_config, si_emit_msaa_config);
si_init_atom(sctx, &sctx->sample_mask.atom, &sctx->atoms.s.sample_mask, si_emit_sample_mask);
si_init_atom(sctx, &sctx->cb_render_state, &sctx->atoms.s.cb_render_state, si_emit_cb_render_state);
diff --git a/src/gallium/drivers/radeonsi/si_state.h b/src/gallium/drivers/radeonsi/si_state.h
index 03e2a17..9d29878 100644
--- a/src/gallium/drivers/radeonsi/si_state.h
+++ b/src/gallium/drivers/radeonsi/si_state.h
@@ -416,20 +416,31 @@ void si_emit_cache_flush(struct si_context *sctx);
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *dinfo);
void si_draw_rectangle(struct blitter_context *blitter,
void *vertex_elements_cso,
blitter_get_vs_func get_vs,
int x1, int y1, int x2, int y2,
float depth, unsigned num_instances,
enum blitter_attrib_type type,
const union blitter_attrib *attrib);
void si_trace_emit(struct si_context *sctx);
+/* si_state_streamout.c */
+void si_streamout_buffers_dirty(struct si_context *sctx);
+void si_common_set_streamout_targets(struct pipe_context *ctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offset);
+void si_emit_streamout_end(struct si_context *sctx);
+void si_update_prims_generated_query_state(struct si_context *sctx,
+ unsigned type, int diff);
+void si_init_streamout_functions(struct si_context *sctx);
+
static inline unsigned
si_tile_mode_index(struct r600_texture *rtex, unsigned level, bool stencil)
{
if (stencil)
return rtex->surface.u.legacy.stencil_tiling_index[level];
else
return rtex->surface.u.legacy.tiling_index[level];
}
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c
index 6eab4cb..9468fde 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.c
+++ b/src/gallium/drivers/radeonsi/si_state_draw.c
@@ -645,22 +645,22 @@ static void si_emit_draw_packets(struct si_context *sctx,
unsigned index_offset)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
uint32_t index_max_size = 0;
uint64_t index_va = 0;
if (info->count_from_stream_output) {
- struct r600_so_target *t =
- (struct r600_so_target*)info->count_from_stream_output;
+ struct si_streamout_target *t =
+ (struct si_streamout_target*)info->count_from_stream_output;
uint64_t va = t->buf_filled_size->gpu_address +
t->buf_filled_size_offset;
radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
t->stride_in_dw);
radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
COPY_DATA_DST_SEL(COPY_DATA_REG) |
COPY_DATA_WR_CONFIRM);
@@ -1479,21 +1479,21 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
if (unlikely(sctx->current_saved_cs)) {
si_trace_emit(sctx);
si_log_draw_state(sctx, sctx->b.log);
}
/* Workaround for a VGT hang when streamout is enabled.
* It must be done after drawing. */
if ((sctx->b.family == CHIP_HAWAII ||
sctx->b.family == CHIP_TONGA ||
sctx->b.family == CHIP_FIJI) &&
- r600_get_strmout_en(&sctx->b)) {
+ si_get_strmout_en(sctx)) {
sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
}
if (unlikely(sctx->decompression_enabled)) {
sctx->b.num_decompress_calls++;
} else {
sctx->b.num_draw_calls++;
if (sctx->framebuffer.state.nr_cbufs > 1)
sctx->b.num_mrt_draw_calls++;
if (info->primitive_restart)
diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c
index dbaa2dc..9340328 100644
--- a/src/gallium/drivers/radeonsi/si_state_shaders.c
+++ b/src/gallium/drivers/radeonsi/si_state_shaders.c
@@ -2245,23 +2245,23 @@ static void *si_create_shader_selector(struct pipe_context *ctx,
return sel;
}
static void si_update_streamout_state(struct si_context *sctx)
{
struct si_shader_selector *shader_with_so = si_get_vs(sctx)->cso;
if (!shader_with_so)
return;
- sctx->b.streamout.enabled_stream_buffers_mask =
+ sctx->streamout.enabled_stream_buffers_mask =
shader_with_so->enabled_streamout_buffer_mask;
- sctx->b.streamout.stride_in_dw = shader_with_so->so.stride;
+ sctx->streamout.stride_in_dw = shader_with_so->so.stride;
}
static void si_update_clip_regs(struct si_context *sctx,
struct si_shader_selector *old_hw_vs,
struct si_shader *old_hw_vs_variant,
struct si_shader_selector *next_hw_vs,
struct si_shader *next_hw_vs_variant)
{
if (next_hw_vs &&
(!old_hw_vs ||
diff --git a/src/gallium/drivers/radeon/r600_streamout.c b/src/gallium/drivers/radeonsi/si_state_streamout.c
similarity index 58%
rename from src/gallium/drivers/radeon/r600_streamout.c
rename to src/gallium/drivers/radeonsi/si_state_streamout.c
index 5c14b1b..42a83d4 100644
--- a/src/gallium/drivers/radeon/r600_streamout.c
+++ b/src/gallium/drivers/radeonsi/si_state_streamout.c
@@ -17,294 +17,296 @@
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors: Marek Olšák <maraeo at gmail.com>
*
*/
-#include "r600_pipe_common.h"
-#include "r600_cs.h"
+#include "si_pipe.h"
+#include "si_state.h"
+#include "radeon/r600_cs.h"
#include "util/u_memory.h"
-static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable);
+static void si_set_streamout_enable(struct si_context *sctx, bool enable);
static struct pipe_stream_output_target *
-r600_create_so_target(struct pipe_context *ctx,
- struct pipe_resource *buffer,
- unsigned buffer_offset,
- unsigned buffer_size)
+si_create_so_target(struct pipe_context *ctx,
+ struct pipe_resource *buffer,
+ unsigned buffer_offset,
+ unsigned buffer_size)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct r600_so_target *t;
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_streamout_target *t;
struct r600_resource *rbuffer = (struct r600_resource*)buffer;
- t = CALLOC_STRUCT(r600_so_target);
+ t = CALLOC_STRUCT(si_streamout_target);
if (!t) {
return NULL;
}
- u_suballocator_alloc(rctx->allocator_zeroed_memory, 4, 4,
+ u_suballocator_alloc(sctx->b.allocator_zeroed_memory, 4, 4,
&t->buf_filled_size_offset,
(struct pipe_resource**)&t->buf_filled_size);
if (!t->buf_filled_size) {
FREE(t);
return NULL;
}
t->b.reference.count = 1;
t->b.context = ctx;
pipe_resource_reference(&t->b.buffer, buffer);
t->b.buffer_offset = buffer_offset;
t->b.buffer_size = buffer_size;
util_range_add(&rbuffer->valid_buffer_range, buffer_offset,
buffer_offset + buffer_size);
return &t->b;
}
-static void r600_so_target_destroy(struct pipe_context *ctx,
- struct pipe_stream_output_target *target)
+static void si_so_target_destroy(struct pipe_context *ctx,
+ struct pipe_stream_output_target *target)
{
- struct r600_so_target *t = (struct r600_so_target*)target;
+ struct si_streamout_target *t = (struct si_streamout_target*)target;
pipe_resource_reference(&t->b.buffer, NULL);
r600_resource_reference(&t->buf_filled_size, NULL);
FREE(t);
}
-void si_streamout_buffers_dirty(struct r600_common_context *rctx)
+void si_streamout_buffers_dirty(struct si_context *sctx)
{
- if (!rctx->streamout.enabled_mask)
+ if (!sctx->streamout.enabled_mask)
return;
- rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, true);
- r600_set_streamout_enable(rctx, true);
+ si_mark_atom_dirty(sctx, &sctx->streamout.begin_atom);
+ si_set_streamout_enable(sctx, true);
}
void si_common_set_streamout_targets(struct pipe_context *ctx,
unsigned num_targets,
struct pipe_stream_output_target **targets,
const unsigned *offsets)
{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct si_context *sctx = (struct si_context *)ctx;
unsigned i;
unsigned enabled_mask = 0, append_bitmask = 0;
/* Stop streamout. */
- if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) {
- si_emit_streamout_end(rctx);
+ if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
+ si_emit_streamout_end(sctx);
}
/* Set the new targets. */
for (i = 0; i < num_targets; i++) {
- pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]);
+ pipe_so_target_reference((struct pipe_stream_output_target**)&sctx->streamout.targets[i], targets[i]);
if (!targets[i])
continue;
r600_context_add_resource_size(ctx, targets[i]->buffer);
enabled_mask |= 1 << i;
if (offsets[i] == ((unsigned)-1))
append_bitmask |= 1 << i;
}
- for (; i < rctx->streamout.num_targets; i++) {
- pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL);
+ for (; i < sctx->streamout.num_targets; i++) {
+ pipe_so_target_reference((struct pipe_stream_output_target**)&sctx->streamout.targets[i], NULL);
}
- rctx->streamout.enabled_mask = enabled_mask;
+ sctx->streamout.enabled_mask = enabled_mask;
- rctx->streamout.num_targets = num_targets;
- rctx->streamout.append_bitmask = append_bitmask;
+ sctx->streamout.num_targets = num_targets;
+ sctx->streamout.append_bitmask = append_bitmask;
if (num_targets) {
- si_streamout_buffers_dirty(rctx);
+ si_streamout_buffers_dirty(sctx);
} else {
- rctx->set_atom_dirty(rctx, &rctx->streamout.begin_atom, false);
- r600_set_streamout_enable(rctx, false);
+ si_set_atom_dirty(sctx, &sctx->streamout.begin_atom, false);
+ si_set_streamout_enable(sctx, false);
}
}
-static void r600_flush_vgt_streamout(struct r600_common_context *rctx)
+static void si_flush_vgt_streamout(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned reg_strmout_cntl;
/* The register is at different places on different ASICs. */
- if (rctx->chip_class >= CIK) {
+ if (sctx->b.chip_class >= CIK) {
reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
radeon_set_uconfig_reg(cs, reg_strmout_cntl, 0);
} else {
reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
radeon_set_config_reg(cs, reg_strmout_cntl, 0);
}
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
radeon_emit(cs, reg_strmout_cntl >> 2); /* register */
radeon_emit(cs, 0);
radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* reference value */
radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* mask */
radeon_emit(cs, 4); /* poll interval */
}
-static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
+static void si_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom)
{
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
- struct r600_so_target **t = rctx->streamout.targets;
- uint16_t *stride_in_dw = rctx->streamout.stride_in_dw;
+ struct si_context *sctx = (struct si_context*)rctx;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct si_streamout_target **t = sctx->streamout.targets;
+ uint16_t *stride_in_dw = sctx->streamout.stride_in_dw;
unsigned i;
- r600_flush_vgt_streamout(rctx);
+ si_flush_vgt_streamout(sctx);
- for (i = 0; i < rctx->streamout.num_targets; i++) {
+ for (i = 0; i < sctx->streamout.num_targets; i++) {
if (!t[i])
continue;
t[i]->stride_in_dw = stride_in_dw[i];
/* SI binds streamout buffers as shader resources.
* VGT only counts primitives and tells the shader
* through SGPRs what to do. */
radeon_set_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 2);
radeon_emit(cs, (t[i]->b.buffer_offset +
t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */
- if (rctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
+ if (sctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
uint64_t va = t[i]->buf_filled_size->gpu_address +
t[i]->buf_filled_size_offset;
/* Append. */
radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, va); /* src address lo */
radeon_emit(cs, va >> 32); /* src address hi */
- r600_emit_reloc(rctx, &rctx->gfx, t[i]->buf_filled_size,
+ r600_emit_reloc(&sctx->b, &sctx->b.gfx, t[i]->buf_filled_size,
RADEON_USAGE_READ, RADEON_PRIO_SO_FILLED_SIZE);
} else {
/* Start from the beginning. */
radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, t[i]->b.buffer_offset >> 2); /* buffer offset in DW */
radeon_emit(cs, 0); /* unused */
}
}
- rctx->streamout.begin_emitted = true;
+ sctx->streamout.begin_emitted = true;
}
-void si_emit_streamout_end(struct r600_common_context *rctx)
+void si_emit_streamout_end(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = rctx->gfx.cs;
- struct r600_so_target **t = rctx->streamout.targets;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct si_streamout_target **t = sctx->streamout.targets;
unsigned i;
uint64_t va;
- r600_flush_vgt_streamout(rctx);
+ si_flush_vgt_streamout(sctx);
- for (i = 0; i < rctx->streamout.num_targets; i++) {
+ for (i = 0; i < sctx->streamout.num_targets; i++) {
if (!t[i])
continue;
va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) |
STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
radeon_emit(cs, va); /* dst address lo */
radeon_emit(cs, va >> 32); /* dst address hi */
radeon_emit(cs, 0); /* unused */
radeon_emit(cs, 0); /* unused */
- r600_emit_reloc(rctx, &rctx->gfx, t[i]->buf_filled_size,
+ r600_emit_reloc(&sctx->b, &sctx->b.gfx, t[i]->buf_filled_size,
RADEON_USAGE_WRITE, RADEON_PRIO_SO_FILLED_SIZE);
/* Zero the buffer size. The counters (primitives generated,
* primitives emitted) may be enabled even if there is not
* buffer bound. This ensures that the primitives-emitted query
* won't increment. */
radeon_set_context_reg(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 0);
t[i]->buf_filled_size_valid = true;
}
- rctx->streamout.begin_emitted = false;
- rctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH;
+ sctx->streamout.begin_emitted = false;
+ sctx->b.flags |= R600_CONTEXT_STREAMOUT_FLUSH;
}
/* STREAMOUT CONFIG DERIVED STATE
*
* Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
* The buffer mask is an independent state, so no writes occur if there
* are no buffers bound.
*/
-static void r600_emit_streamout_enable(struct r600_common_context *rctx,
- struct r600_atom *atom)
+static void si_emit_streamout_enable(struct r600_common_context *rctx,
+ struct r600_atom *atom)
{
- radeon_set_context_reg_seq(rctx->gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
- radeon_emit(rctx->gfx.cs,
- S_028B94_STREAMOUT_0_EN(r600_get_strmout_en(rctx)) |
+ struct si_context *sctx = (struct si_context*)rctx;
+
+ radeon_set_context_reg_seq(sctx->b.gfx.cs, R_028B94_VGT_STRMOUT_CONFIG, 2);
+ radeon_emit(sctx->b.gfx.cs,
+ S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
S_028B94_RAST_STREAM(0) |
- S_028B94_STREAMOUT_1_EN(r600_get_strmout_en(rctx)) |
- S_028B94_STREAMOUT_2_EN(r600_get_strmout_en(rctx)) |
- S_028B94_STREAMOUT_3_EN(r600_get_strmout_en(rctx)));
- radeon_emit(rctx->gfx.cs,
- rctx->streamout.hw_enabled_mask &
- rctx->streamout.enabled_stream_buffers_mask);
+ S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
+ S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
+ S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
+ radeon_emit(sctx->b.gfx.cs,
+ sctx->streamout.hw_enabled_mask &
+ sctx->streamout.enabled_stream_buffers_mask);
}
-static void r600_set_streamout_enable(struct r600_common_context *rctx, bool enable)
+static void si_set_streamout_enable(struct si_context *sctx, bool enable)
{
- bool old_strmout_en = r600_get_strmout_en(rctx);
- unsigned old_hw_enabled_mask = rctx->streamout.hw_enabled_mask;
+ bool old_strmout_en = si_get_strmout_en(sctx);
+ unsigned old_hw_enabled_mask = sctx->streamout.hw_enabled_mask;
- rctx->streamout.streamout_enabled = enable;
+ sctx->streamout.streamout_enabled = enable;
- rctx->streamout.hw_enabled_mask = rctx->streamout.enabled_mask |
- (rctx->streamout.enabled_mask << 4) |
- (rctx->streamout.enabled_mask << 8) |
- (rctx->streamout.enabled_mask << 12);
+ sctx->streamout.hw_enabled_mask = sctx->streamout.enabled_mask |
+ (sctx->streamout.enabled_mask << 4) |
+ (sctx->streamout.enabled_mask << 8) |
+ (sctx->streamout.enabled_mask << 12);
- if ((old_strmout_en != r600_get_strmout_en(rctx)) ||
- (old_hw_enabled_mask != rctx->streamout.hw_enabled_mask)) {
- rctx->set_atom_dirty(rctx, &rctx->streamout.enable_atom, true);
- }
+ if ((old_strmout_en != si_get_strmout_en(sctx)) ||
+ (old_hw_enabled_mask != sctx->streamout.hw_enabled_mask))
+ si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
}
-void si_update_prims_generated_query_state(struct r600_common_context *rctx,
+void si_update_prims_generated_query_state(struct si_context *sctx,
unsigned type, int diff)
{
if (type == PIPE_QUERY_PRIMITIVES_GENERATED) {
- bool old_strmout_en = r600_get_strmout_en(rctx);
+ bool old_strmout_en = si_get_strmout_en(sctx);
- rctx->streamout.num_prims_gen_queries += diff;
- assert(rctx->streamout.num_prims_gen_queries >= 0);
+ sctx->streamout.num_prims_gen_queries += diff;
+ assert(sctx->streamout.num_prims_gen_queries >= 0);
- rctx->streamout.prims_gen_query_enabled =
- rctx->streamout.num_prims_gen_queries != 0;
+ sctx->streamout.prims_gen_query_enabled =
+ sctx->streamout.num_prims_gen_queries != 0;
- if (old_strmout_en != r600_get_strmout_en(rctx)) {
- rctx->set_atom_dirty(rctx, &rctx->streamout.enable_atom, true);
- }
+ if (old_strmout_en != si_get_strmout_en(sctx))
+ si_mark_atom_dirty(sctx, &sctx->streamout.enable_atom);
}
}
-void si_streamout_init(struct r600_common_context *rctx)
+void si_init_streamout_functions(struct si_context *sctx)
{
- rctx->b.create_stream_output_target = r600_create_so_target;
- rctx->b.stream_output_target_destroy = r600_so_target_destroy;
- rctx->streamout.begin_atom.emit = r600_emit_streamout_begin;
- rctx->streamout.enable_atom.emit = r600_emit_streamout_enable;
+ sctx->b.b.create_stream_output_target = si_create_so_target;
+ sctx->b.b.stream_output_target_destroy = si_so_target_destroy;
+ sctx->streamout.begin_atom.emit = si_emit_streamout_begin;
+ sctx->streamout.enable_atom.emit = si_emit_streamout_enable;
}
--
2.7.4
More information about the mesa-dev
mailing list