[Mesa-dev] [PATCH 15/17] gallium/radeon: remove r600_common_context::max_db
Marek Olšák
maraeo at gmail.com
Thu Jan 26 16:04:31 UTC 2017
From: Marek Olšák <marek.olsak at amd.com>
this cleanup is based on the vulkan driver, which seems to do the same thing
---
src/gallium/drivers/radeon/r600_pipe_common.c | 7 -------
src/gallium/drivers/radeon/r600_pipe_common.h | 2 --
src/gallium/drivers/radeon/r600_query.c | 28 ++++++++++++++++-----------
3 files changed, 17 insertions(+), 20 deletions(-)
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c
index 5be21b4..aa39ef1 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.c
+++ b/src/gallium/drivers/radeon/r600_pipe_common.c
@@ -554,27 +554,20 @@ bool r600_common_context_init(struct r600_common_context *rctx,
struct r600_common_screen *rscreen,
unsigned context_flags)
{
slab_create_child(&rctx->pool_transfers, &rscreen->pool_transfers);
rctx->screen = rscreen;
rctx->ws = rscreen->ws;
rctx->family = rscreen->family;
rctx->chip_class = rscreen->chip_class;
- if (rscreen->chip_class >= CIK)
- rctx->max_db = MAX2(8, rscreen->info.num_render_backends);
- else if (rscreen->chip_class >= EVERGREEN)
- rctx->max_db = 8;
- else
- rctx->max_db = 4;
-
rctx->b.invalidate_resource = r600_invalidate_resource;
rctx->b.transfer_map = u_transfer_map_vtbl;
rctx->b.transfer_flush_region = u_transfer_flush_region_vtbl;
rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
rctx->b.texture_subdata = u_default_texture_subdata;
rctx->b.memory_barrier = r600_memory_barrier;
rctx->b.flush = r600_flush_from_st;
rctx->b.set_debug_callback = r600_set_debug_callback;
rctx->dma_clear_buffer = r600_dma_clear_buffer_fallback;
diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h
index bc88fab..fafe6c1 100644
--- a/src/gallium/drivers/radeon/r600_pipe_common.h
+++ b/src/gallium/drivers/radeon/r600_pipe_common.h
@@ -573,22 +573,20 @@ struct r600_common_context {
/* Additional context states. */
unsigned flags; /* flush flags */
/* Queries. */
/* Maintain the list of active queries for pausing between IBs. */
int num_occlusion_queries;
int num_perfect_occlusion_queries;
struct list_head active_queries;
unsigned num_cs_dw_queries_suspend;
- /* Additional hardware info. */
- unsigned max_db; /* for OQ */
/* Misc stats. */
unsigned num_draw_calls;
unsigned num_spill_draw_calls;
unsigned num_compute_calls;
unsigned num_spill_compute_calls;
unsigned num_dma_calls;
unsigned num_cp_dma_calls;
unsigned num_vs_flushes;
unsigned num_ps_flushes;
unsigned num_cs_flushes;
diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c
index 564f59e..abf1c7e 100644
--- a/src/gallium/drivers/radeon/r600_query.c
+++ b/src/gallium/drivers/radeon/r600_query.c
@@ -422,34 +422,35 @@ static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_UNSYNCHRONIZED);
if (!results)
return false;
memset(results, 0, buffer->b.b.width0);
if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
+ unsigned max_rbs = ctx->screen->info.num_render_backends;
unsigned enabled_rb_mask = ctx->screen->info.enabled_rb_mask;
unsigned num_results;
unsigned i, j;
/* Set top bits for unused backends. */
num_results = buffer->b.b.width0 / query->result_size;
for (j = 0; j < num_results; j++) {
- for (i = 0; i < ctx->max_db; i++) {
+ for (i = 0; i < max_rbs; i++) {
if (!(enabled_rb_mask & (1<<i))) {
results[(i * 4)+1] = 0x80000000;
results[(i * 4)+3] = 0x80000000;
}
}
- results += 4 * ctx->max_db;
+ results += 4 * max_rbs;
}
}
return true;
}
static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
struct r600_query *rquery,
bool wait,
enum pipe_query_value_type result_type,
@@ -505,21 +506,21 @@ static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
if (!query)
return NULL;
query->b.type = query_type;
query->b.ops = &query_hw_ops;
query->ops = &query_hw_default_hw_ops;
switch (query_type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
- query->result_size = 16 * rctx->max_db;
+ query->result_size = 16 * rctx->screen->info.num_render_backends;
query->result_size += 16; /* for the fence + alignment */
query->num_cs_dw_begin = 6;
query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
break;
case PIPE_QUERY_TIME_ELAPSED:
query->result_size = 24;
query->num_cs_dw_begin = 8;
query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
break;
case PIPE_QUERY_TIMESTAMP:
@@ -680,21 +681,21 @@ static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
va += 8;
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32) & 0xFFFF);
- fence_va = va + ctx->max_db * 16 - 8;
+ fence_va = va + ctx->screen->info.num_render_backends * 16 - 8;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_SO_STATISTICS:
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
va += query->result_size/2;
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32) & 0xFFFF);
@@ -916,31 +917,33 @@ bool r600_query_hw_end(struct r600_common_context *rctx,
if (!query->buffer.buf)
return false;
return true;
}
static void r600_get_hw_query_params(struct r600_common_context *rctx,
struct r600_query_hw *rquery, int index,
struct r600_hw_query_params *params)
{
+ unsigned max_rbs = rctx->screen->info.num_render_backends;
+
params->pair_stride = 0;
params->pair_count = 1;
switch (rquery->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
params->start_offset = 0;
params->end_offset = 8;
- params->fence_offset = rctx->max_db * 16;
+ params->fence_offset = max_rbs * 16;
params->pair_stride = 16;
- params->pair_count = rctx->max_db;
+ params->pair_count = max_rbs;
break;
case PIPE_QUERY_TIME_ELAPSED:
params->start_offset = 0;
params->end_offset = 8;
params->fence_offset = 16;
break;
case PIPE_QUERY_TIMESTAMP:
params->start_offset = 0;
params->end_offset = 0;
params->fence_offset = 8;
@@ -990,31 +993,33 @@ static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned
return end - start;
}
return 0;
}
static void r600_query_hw_add_result(struct r600_common_context *ctx,
struct r600_query_hw *query,
void *buffer,
union pipe_query_result *result)
{
+ unsigned max_rbs = ctx->screen->info.num_render_backends;
+
switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER: {
- for (unsigned i = 0; i < ctx->max_db; ++i) {
+ for (unsigned i = 0; i < max_rbs; ++i) {
unsigned results_base = i * 16;
result->u64 +=
r600_query_read_result(buffer + results_base, 0, 2, true);
}
break;
}
case PIPE_QUERY_OCCLUSION_PREDICATE: {
- for (unsigned i = 0; i < ctx->max_db; ++i) {
+ for (unsigned i = 0; i < max_rbs; ++i) {
unsigned results_base = i * 16;
result->b = result->b ||
r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
}
break;
}
case PIPE_QUERY_TIME_ELAPSED:
result->u64 += r600_query_read_result(buffer, 0, 2, false);
break;
case PIPE_QUERY_TIMESTAMP:
@@ -1608,20 +1613,21 @@ void r600_resume_queries(struct r600_common_context *ctx)
/* Fix radeon_info::enabled_rb_mask for R600, R700, EVERGREEN, NI. */
void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
{
struct r600_common_context *ctx =
(struct r600_common_context*)rscreen->aux_context;
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct r600_resource *buffer;
uint32_t *results;
unsigned i, mask = 0;
+ unsigned max_rbs = ctx->screen->info.num_render_backends;
assert(rscreen->chip_class <= CAYMAN);
/* if backend_map query is supported by the kernel */
if (rscreen->info.r600_gb_backend_map_valid) {
unsigned num_tile_pipes = rscreen->info.num_tile_pipes;
unsigned backend_map = rscreen->info.r600_gb_backend_map;
unsigned item_width, item_mask;
if (ctx->chip_class >= EVERGREEN) {
@@ -1641,42 +1647,42 @@ void r600_query_fix_enabled_rb_mask(struct r600_common_screen *rscreen)
rscreen->info.enabled_rb_mask = mask;
return;
}
}
/* otherwise backup path for older kernels */
/* create buffer for event data */
buffer = (struct r600_resource*)
pipe_buffer_create(ctx->b.screen, 0,
- PIPE_USAGE_STAGING, ctx->max_db*16);
+ PIPE_USAGE_STAGING, max_rbs * 16);
if (!buffer)
return;
/* initialize buffer with zeroes */
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
if (results) {
- memset(results, 0, ctx->max_db * 4 * 4);
+ memset(results, 0, max_rbs * 4 * 4);
/* emit EVENT_WRITE for ZPASS_DONE */
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
radeon_emit(cs, buffer->gpu_address);
radeon_emit(cs, buffer->gpu_address >> 32);
r600_emit_reloc(ctx, &ctx->gfx, buffer,
RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
/* analyze results */
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
if (results) {
- for(i = 0; i < ctx->max_db; i++) {
+ for(i = 0; i < max_rbs; i++) {
/* at least highest bit will be set if backend is used */
if (results[i*4 + 1])
mask |= (1<<i);
}
}
}
r600_resource_reference(&buffer, NULL);
if (mask)
--
2.7.4
More information about the mesa-dev
mailing list