[Mesa-dev] [PATCH 5/5] radeonsi: use local ws variable in si_need_dma_space
Marek Olšák
maraeo at gmail.com
Thu Jan 31 18:56:29 UTC 2019
From: Marek Olšák <marek.olsak at amd.com>
---
src/gallium/drivers/radeonsi/si_dma_cs.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/src/gallium/drivers/radeonsi/si_dma_cs.c b/src/gallium/drivers/radeonsi/si_dma_cs.c
index 33177a9e4ad..2aafc1f09a0 100644
--- a/src/gallium/drivers/radeonsi/si_dma_cs.c
+++ b/src/gallium/drivers/radeonsi/si_dma_cs.c
@@ -119,71 +119,72 @@ void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
radeon_emit(cs, clear_value);
radeon_emit(cs, sctx->chip_class >= GFX9 ? csize - 1 : csize);
offset += csize;
size -= csize;
}
}
void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
struct si_resource *dst, struct si_resource *src)
{
+ struct radeon_winsys *ws = ctx->ws;
uint64_t vram = ctx->dma_cs->used_vram;
uint64_t gtt = ctx->dma_cs->used_gart;
if (dst) {
vram += dst->vram_usage;
gtt += dst->gart_usage;
}
if (src) {
vram += src->vram_usage;
gtt += src->gart_usage;
}
/* Flush the GFX IB if DMA depends on it. */
if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
+ ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
(src &&
- ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
- RADEON_USAGE_WRITE))))
+ ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
+ RADEON_USAGE_WRITE))))
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* is too large.
*
* IBs using too little memory are limited by the IB submission overhead.
* IBs using too much memory are limited by the kernel/TTM overhead.
* Too long IBs create CPU-GPU pipeline bubbles and add latency.
*
* This heuristic makes sure that DMA requests are executed
* very soon after the call is made and lowers memory usage.
* It improves texture upload performance by keeping the DMA
* engine busy while uploads are being submitted.
*/
num_dw++; /* for emit_wait_idle below */
- if (!ctx->ws->cs_check_space(ctx->dma_cs, num_dw) ||
+ if (!ws->cs_check_space(ctx->dma_cs, num_dw) ||
ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt)) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw);
}
/* Wait for idle if either buffer has been used in the IB before to
* prevent read-after-write hazards.
*/
if ((dst &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
- RADEON_USAGE_READWRITE)) ||
+ ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
+ RADEON_USAGE_READWRITE)) ||
(src &&
- ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
- RADEON_USAGE_WRITE)))
+ ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
+ RADEON_USAGE_WRITE)))
si_dma_emit_wait_idle(ctx);
if (dst) {
radeon_add_to_buffer_list(ctx, ctx->dma_cs, dst,
RADEON_USAGE_WRITE, 0);
}
if (src) {
radeon_add_to_buffer_list(ctx, ctx->dma_cs, src,
RADEON_USAGE_READ, 0);
}
--
2.17.1
More information about the mesa-dev
mailing list