[Mesa-dev] [PATCH 5/8] radeonsi: wrap si_transfer::staging in a union
Marek Olšák
maraeo at gmail.com
Fri Jan 18 16:43:56 UTC 2019
From: Marek Olšák <marek.olsak at amd.com>
the union will be used later.
---
src/gallium/drivers/radeonsi/si_buffer.c | 8 +++----
src/gallium/drivers/radeonsi/si_pipe.h | 4 +++-
src/gallium/drivers/radeonsi/si_texture.c | 26 +++++++++++------------
3 files changed, 20 insertions(+), 18 deletions(-)
diff --git a/src/gallium/drivers/radeonsi/si_buffer.c b/src/gallium/drivers/radeonsi/si_buffer.c
index c7260e06ccf..4766cf4bdfa 100644
--- a/src/gallium/drivers/radeonsi/si_buffer.c
+++ b/src/gallium/drivers/radeonsi/si_buffer.c
@@ -350,21 +350,21 @@ static void *si_buffer_get_transfer(struct pipe_context *ctx,
transfer->b.b.resource = NULL;
pipe_resource_reference(&transfer->b.b.resource, resource);
transfer->b.b.level = 0;
transfer->b.b.usage = usage;
transfer->b.b.box = *box;
transfer->b.b.stride = 0;
transfer->b.b.layer_stride = 0;
transfer->b.staging = NULL;
transfer->offset = offset;
- transfer->staging = staging;
+ transfer->u.staging = staging;
*ptransfer = &transfer->b.b;
return data;
}
static void *si_buffer_transfer_map(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **ptransfer)
@@ -513,24 +513,24 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx,
ptransfer, data, NULL, 0);
}
static void si_buffer_do_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
const struct pipe_box *box)
{
struct si_transfer *stransfer = (struct si_transfer*)transfer;
struct r600_resource *rbuffer = r600_resource(transfer->resource);
- if (stransfer->staging) {
+ if (stransfer->u.staging) {
/* Copy the staging buffer into the original one. */
si_copy_buffer((struct si_context*)ctx, transfer->resource,
- &stransfer->staging->b.b, box->x,
+ &stransfer->u.staging->b.b, box->x,
stransfer->offset + box->x % SI_MAP_BUFFER_ALIGNMENT,
box->width);
}
util_range_add(&rbuffer->valid_buffer_range, box->x,
box->x + box->width);
}
static void si_buffer_flush_region(struct pipe_context *ctx,
struct pipe_transfer *transfer,
@@ -550,21 +550,21 @@ static void si_buffer_flush_region(struct pipe_context *ctx,
static void si_buffer_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer *transfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct si_transfer *stransfer = (struct si_transfer*)transfer;
if (transfer->usage & PIPE_TRANSFER_WRITE &&
!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
si_buffer_do_flush_region(ctx, transfer, &transfer->box);
- r600_resource_reference(&stransfer->staging, NULL);
+ r600_resource_reference(&stransfer->u.staging, NULL);
assert(stransfer->b.staging == NULL); /* for threaded context only */
pipe_resource_reference(&transfer->resource, NULL);
/* Don't use pool_transfers_unsync. We are always in the driver
* thread. */
slab_free(&sctx->pool_transfers, transfer);
}
static void si_buffer_subdata(struct pipe_context *ctx,
struct pipe_resource *buffer,
diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h
index d874f215a21..5bd3d9641d2 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.h
+++ b/src/gallium/drivers/radeonsi/si_pipe.h
@@ -245,21 +245,23 @@ struct r600_resource {
/* Whether this resource is referenced by bindless handles. */
bool texture_handle_allocated;
bool image_handle_allocated;
/* Whether the resource has been exported via resource_get_handle. */
unsigned external_usage; /* PIPE_HANDLE_USAGE_* */
};
struct si_transfer {
struct threaded_transfer b;
- struct r600_resource *staging;
+ union {
+ struct r600_resource *staging;
+ } u;
unsigned offset;
};
struct si_texture {
struct r600_resource buffer;
struct radeon_surf surface;
uint64_t size;
struct si_texture *flushed_depth_texture;
diff --git a/src/gallium/drivers/radeonsi/si_texture.c b/src/gallium/drivers/radeonsi/si_texture.c
index 585f58c1e38..8f81c777aba 100644
--- a/src/gallium/drivers/radeonsi/si_texture.c
+++ b/src/gallium/drivers/radeonsi/si_texture.c
@@ -135,40 +135,40 @@ static void si_copy_region_with_blit(struct pipe_context *pipe,
if (blit.mask) {
pipe->blit(pipe, &blit);
}
}
/* Copy from a full GPU texture to a transfer's staging one. */
static void si_copy_to_staging_texture(struct pipe_context *ctx, struct si_transfer *stransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer *transfer = (struct pipe_transfer*)stransfer;
- struct pipe_resource *dst = &stransfer->staging->b.b;
+ struct pipe_resource *dst = &stransfer->u.staging->b.b;
struct pipe_resource *src = transfer->resource;
if (src->nr_samples > 1) {
si_copy_region_with_blit(ctx, dst, 0, 0, 0, 0,
src, transfer->level, &transfer->box);
return;
}
sctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level,
&transfer->box);
}
/* Copy from a transfer's staging texture to a full GPU one. */
static void si_copy_from_staging_texture(struct pipe_context *ctx, struct si_transfer *stransfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct pipe_transfer *transfer = (struct pipe_transfer*)stransfer;
struct pipe_resource *dst = transfer->resource;
- struct pipe_resource *src = &stransfer->staging->b.b;
+ struct pipe_resource *src = &stransfer->u.staging->b.b;
struct pipe_box sbox;
u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox);
if (dst->nr_samples > 1) {
si_copy_region_with_blit(ctx, dst, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
src, 0, &sbox);
return;
}
@@ -1742,50 +1742,50 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
level, level,
box->z, box->z + box->depth - 1,
0, 0);
offset = si_texture_get_offset(sctx->screen, staging_depth,
level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
}
- trans->staging = &staging_depth->buffer;
- buf = trans->staging;
+ trans->u.staging = &staging_depth->buffer;
+ buf = trans->u.staging;
} else if (use_staging_texture) {
struct pipe_resource resource;
struct si_texture *staging;
si_init_temp_resource_from_box(&resource, texture, box, level,
SI_RESOURCE_FLAG_TRANSFER);
resource.usage = (usage & PIPE_TRANSFER_READ) ?
PIPE_USAGE_STAGING : PIPE_USAGE_STREAM;
/* Create the temporary texture. */
staging = (struct si_texture*)ctx->screen->resource_create(ctx->screen, &resource);
if (!staging) {
PRINT_ERR("failed to create temporary texture to hold untiled copy\n");
goto fail_trans;
}
- trans->staging = &staging->buffer;
+ trans->u.staging = &staging->buffer;
/* Just get the strides. */
si_texture_get_offset(sctx->screen, staging, 0, NULL,
&trans->b.b.stride,
&trans->b.b.layer_stride);
if (usage & PIPE_TRANSFER_READ)
si_copy_to_staging_texture(ctx, trans);
else
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
- buf = trans->staging;
+ buf = trans->u.staging;
} else {
/* the resource is mapped directly */
offset = si_texture_get_offset(sctx->screen, tex, level, box,
&trans->b.b.stride,
&trans->b.b.layer_stride);
buf = &tex->buffer;
}
/* Always unmap texture CPU mappings on 32-bit architectures, so that
* we don't run out of the CPU address space.
@@ -1793,58 +1793,58 @@ static void *si_texture_transfer_map(struct pipe_context *ctx,
if (sizeof(void*) == 4)
usage |= RADEON_TRANSFER_TEMPORARY;
if (!(map = si_buffer_map_sync_with_rings(sctx, buf, usage)))
goto fail_trans;
*ptransfer = &trans->b.b;
return map + offset;
fail_trans:
- r600_resource_reference(&trans->staging, NULL);
+ r600_resource_reference(&trans->u.staging, NULL);
pipe_resource_reference(&trans->b.b.resource, NULL);
FREE(trans);
return NULL;
}
static void si_texture_transfer_unmap(struct pipe_context *ctx,
struct pipe_transfer* transfer)
{
struct si_context *sctx = (struct si_context*)ctx;
struct si_transfer *stransfer = (struct si_transfer*)transfer;
struct pipe_resource *texture = transfer->resource;
struct si_texture *tex = (struct si_texture*)texture;
/* Always unmap texture CPU mappings on 32-bit architectures, so that
* we don't run out of the CPU address space.
*/
if (sizeof(void*) == 4) {
struct r600_resource *buf =
- stransfer->staging ? stransfer->staging : &tex->buffer;
+ stransfer->u.staging ? stransfer->u.staging : &tex->buffer;
sctx->ws->buffer_unmap(buf->buf);
}
- if ((transfer->usage & PIPE_TRANSFER_WRITE) && stransfer->staging) {
+ if ((transfer->usage & PIPE_TRANSFER_WRITE) && stransfer->u.staging) {
if (tex->is_depth && tex->buffer.b.b.nr_samples <= 1) {
ctx->resource_copy_region(ctx, texture, transfer->level,
transfer->box.x, transfer->box.y, transfer->box.z,
- &stransfer->staging->b.b, transfer->level,
+ &stransfer->u.staging->b.b, transfer->level,
&transfer->box);
} else {
si_copy_from_staging_texture(ctx, stransfer);
}
}
- if (stransfer->staging) {
- sctx->num_alloc_tex_transfer_bytes += stransfer->staging->buf->size;
- r600_resource_reference(&stransfer->staging, NULL);
+ if (stransfer->u.staging) {
+ sctx->num_alloc_tex_transfer_bytes += stransfer->u.staging->buf->size;
+ r600_resource_reference(&stransfer->u.staging, NULL);
}
/* Heuristic for {upload, draw, upload, draw, ..}:
*
* Flush the gfx IB if we've allocated too much texture storage.
*
* The idea is that we don't want to build IBs that use too much
* memory and put pressure on the kernel memory manager and we also
* want to make temporary and invalidated buffers go idle ASAP to
* decrease the total memory usage or make them reusable. The memory
--
2.17.1
More information about the mesa-dev
mailing list