[Mesa-dev] [PATCH 6/9] radeonsi: rename rfence -> sfence
Marek Olšák
maraeo at gmail.com
Sat Jan 19 00:40:19 UTC 2019
From: Marek Olšák <marek.olsak at amd.com>
---
src/gallium/drivers/radeonsi/si_fence.c | 98 ++++++++++++-------------
1 file changed, 49 insertions(+), 49 deletions(-)
diff --git a/src/gallium/drivers/radeonsi/si_fence.c b/src/gallium/drivers/radeonsi/si_fence.c
index bb53ccba947..78da742b5da 100644
--- a/src/gallium/drivers/radeonsi/si_fence.c
+++ b/src/gallium/drivers/radeonsi/si_fence.c
@@ -279,81 +279,81 @@ static void si_fine_fence_set(struct si_context *ctx,
assert(false);
}
}
static boolean si_fence_finish(struct pipe_screen *screen,
struct pipe_context *ctx,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
struct radeon_winsys *rws = ((struct si_screen*)screen)->ws;
- struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
+ struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
struct si_context *sctx;
int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
ctx = threaded_context_unwrap_sync(ctx);
sctx = (struct si_context*)(ctx ? ctx : NULL);
- if (!util_queue_fence_is_signalled(&rfence->ready)) {
- if (rfence->tc_token) {
+ if (!util_queue_fence_is_signalled(&sfence->ready)) {
+ if (sfence->tc_token) {
/* Ensure that si_flush_from_st will be called for
* this fence, but only if we're in the API thread
* where the context is current.
*
* Note that the batch containing the flush may already
* be in flight in the driver thread, so the fence
* may not be ready yet when this call returns.
*/
- threaded_context_flush(ctx, rfence->tc_token,
+ threaded_context_flush(ctx, sfence->tc_token,
timeout == 0);
}
if (!timeout)
return false;
if (timeout == PIPE_TIMEOUT_INFINITE) {
- util_queue_fence_wait(&rfence->ready);
+ util_queue_fence_wait(&sfence->ready);
} else {
- if (!util_queue_fence_wait_timeout(&rfence->ready, abs_timeout))
+ if (!util_queue_fence_wait_timeout(&sfence->ready, abs_timeout))
return false;
}
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
int64_t time = os_time_get_nano();
timeout = abs_timeout > time ? abs_timeout - time : 0;
}
}
- if (rfence->sdma) {
- if (!rws->fence_wait(rws, rfence->sdma, timeout))
+ if (sfence->sdma) {
+ if (!rws->fence_wait(rws, sfence->sdma, timeout))
return false;
/* Recompute the timeout after waiting. */
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
int64_t time = os_time_get_nano();
timeout = abs_timeout > time ? abs_timeout - time : 0;
}
}
- if (!rfence->gfx)
+ if (!sfence->gfx)
return true;
- if (rfence->fine.buf &&
- si_fine_fence_signaled(rws, &rfence->fine)) {
- rws->fence_reference(&rfence->gfx, NULL);
- si_resource_reference(&rfence->fine.buf, NULL);
+ if (sfence->fine.buf &&
+ si_fine_fence_signaled(rws, &sfence->fine)) {
+ rws->fence_reference(&sfence->gfx, NULL);
+ si_resource_reference(&sfence->fine.buf, NULL);
return true;
}
/* Flush the gfx IB if it hasn't been flushed yet. */
- if (sctx && rfence->gfx_unflushed.ctx == sctx &&
- rfence->gfx_unflushed.ib_index == sctx->num_gfx_cs_flushes) {
+ if (sctx && sfence->gfx_unflushed.ctx == sctx &&
+ sfence->gfx_unflushed.ib_index == sctx->num_gfx_cs_flushes) {
/* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
* spec says:
*
* "If the sync object being blocked upon will not be
* signaled in finite time (for example, by an associated
* fence command issued previously, but not yet flushed to
* the graphics pipeline), then ClientWaitSync may hang
* forever. To help prevent this behavior, if
* ClientWaitSync is called and all of the following are
* true:
@@ -366,111 +366,111 @@ static boolean si_fence_finish(struct pipe_screen *screen,
* then the GL will behave as if the equivalent of Flush
* were inserted immediately after the creation of sync."
*
* This means we need to flush for such fences even when we're
* not going to wait.
*/
si_flush_gfx_cs(sctx,
(timeout ? 0 : PIPE_FLUSH_ASYNC) |
RADEON_FLUSH_START_NEXT_GFX_IB_NOW,
NULL);
- rfence->gfx_unflushed.ctx = NULL;
+ sfence->gfx_unflushed.ctx = NULL;
if (!timeout)
return false;
/* Recompute the timeout after all that. */
if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
int64_t time = os_time_get_nano();
timeout = abs_timeout > time ? abs_timeout - time : 0;
}
}
- if (rws->fence_wait(rws, rfence->gfx, timeout))
+ if (rws->fence_wait(rws, sfence->gfx, timeout))
return true;
/* Re-check in case the GPU is slow or hangs, but the commands before
* the fine-grained fence have completed. */
- if (rfence->fine.buf &&
- si_fine_fence_signaled(rws, &rfence->fine))
+ if (sfence->fine.buf &&
+ si_fine_fence_signaled(rws, &sfence->fine))
return true;
return false;
}
static void si_create_fence_fd(struct pipe_context *ctx,
struct pipe_fence_handle **pfence, int fd,
enum pipe_fd_type type)
{
struct si_screen *sscreen = (struct si_screen*)ctx->screen;
struct radeon_winsys *ws = sscreen->ws;
- struct si_multi_fence *rfence;
+ struct si_multi_fence *sfence;
*pfence = NULL;
- rfence = si_create_multi_fence();
- if (!rfence)
+ sfence = si_create_multi_fence();
+ if (!sfence)
return;
switch (type) {
case PIPE_FD_TYPE_NATIVE_SYNC:
if (!sscreen->info.has_fence_to_handle)
goto finish;
- rfence->gfx = ws->fence_import_sync_file(ws, fd);
+ sfence->gfx = ws->fence_import_sync_file(ws, fd);
break;
case PIPE_FD_TYPE_SYNCOBJ:
if (!sscreen->info.has_syncobj)
goto finish;
- rfence->gfx = ws->fence_import_syncobj(ws, fd);
+ sfence->gfx = ws->fence_import_syncobj(ws, fd);
break;
default:
unreachable("bad fence fd type when importing");
}
finish:
- if (!rfence->gfx) {
- FREE(rfence);
+ if (!sfence->gfx) {
+ FREE(sfence);
return;
}
- *pfence = (struct pipe_fence_handle*)rfence;
+ *pfence = (struct pipe_fence_handle*)sfence;
}
static int si_fence_get_fd(struct pipe_screen *screen,
struct pipe_fence_handle *fence)
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct radeon_winsys *ws = sscreen->ws;
- struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
+ struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
int gfx_fd = -1, sdma_fd = -1;
if (!sscreen->info.has_fence_to_handle)
return -1;
- util_queue_fence_wait(&rfence->ready);
+ util_queue_fence_wait(&sfence->ready);
/* Deferred fences aren't supported. */
- assert(!rfence->gfx_unflushed.ctx);
- if (rfence->gfx_unflushed.ctx)
+ assert(!sfence->gfx_unflushed.ctx);
+ if (sfence->gfx_unflushed.ctx)
return -1;
- if (rfence->sdma) {
- sdma_fd = ws->fence_export_sync_file(ws, rfence->sdma);
+ if (sfence->sdma) {
+ sdma_fd = ws->fence_export_sync_file(ws, sfence->sdma);
if (sdma_fd == -1)
return -1;
}
- if (rfence->gfx) {
- gfx_fd = ws->fence_export_sync_file(ws, rfence->gfx);
+ if (sfence->gfx) {
+ gfx_fd = ws->fence_export_sync_file(ws, sfence->gfx);
if (gfx_fd == -1) {
if (sdma_fd != -1)
close(sdma_fd);
return -1;
}
}
/* If we don't have FDs at this point, it means we don't have fences
* either. */
if (sdma_fd == -1 && gfx_fd == -1)
@@ -577,67 +577,67 @@ finish:
if (sctx->dma_cs)
ws->cs_sync_flush(sctx->dma_cs);
ws->cs_sync_flush(sctx->gfx_cs);
}
}
static void si_fence_server_signal(struct pipe_context *ctx,
struct pipe_fence_handle *fence)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
+ struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
/* We should have at least one syncobj to signal */
- assert(rfence->sdma || rfence->gfx);
+ assert(sfence->sdma || sfence->gfx);
- if (rfence->sdma)
- si_add_syncobj_signal(sctx, rfence->sdma);
- if (rfence->gfx)
- si_add_syncobj_signal(sctx, rfence->gfx);
+ if (sfence->sdma)
+ si_add_syncobj_signal(sctx, sfence->sdma);
+ if (sfence->gfx)
+ si_add_syncobj_signal(sctx, sfence->gfx);
/**
* The spec does not require a flush here. We insert a flush
* because syncobj based signals are not directly placed into
* the command stream. Instead the signal happens when the
* submission associated with the syncobj finishes execution.
*
* Therefore, we must make sure that we flush the pipe to avoid
* new work being emitted and getting executed before the signal
* operation.
*/
si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
}
static void si_fence_server_sync(struct pipe_context *ctx,
struct pipe_fence_handle *fence)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
+ struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
- util_queue_fence_wait(&rfence->ready);
+ util_queue_fence_wait(&sfence->ready);
/* Unflushed fences from the same context are no-ops. */
- if (rfence->gfx_unflushed.ctx &&
- rfence->gfx_unflushed.ctx == sctx)
+ if (sfence->gfx_unflushed.ctx &&
+ sfence->gfx_unflushed.ctx == sctx)
return;
/* All unflushed commands will not start execution before
* this fence dependency is signalled.
*
* Therefore we must flush before inserting the dependency
*/
si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
- if (rfence->sdma)
- si_add_fence_dependency(sctx, rfence->sdma);
- if (rfence->gfx)
- si_add_fence_dependency(sctx, rfence->gfx);
+ if (sfence->sdma)
+ si_add_fence_dependency(sctx, sfence->sdma);
+ if (sfence->gfx)
+ si_add_fence_dependency(sctx, sfence->gfx);
}
void si_init_fence_functions(struct si_context *ctx)
{
ctx->b.flush = si_flush_from_st;
ctx->b.create_fence_fd = si_create_fence_fd;
ctx->b.fence_server_sync = si_fence_server_sync;
ctx->b.fence_server_signal = si_fence_server_signal;
}
--
2.17.1
More information about the mesa-dev
mailing list