Mesa (main): mesa/st: migrate most of state tracker buffer objects into mesa
GitLab Mirror
gitlab-mirror at kemper.freedesktop.org
Wed Dec 15 14:02:49 UTC 2021
Module: Mesa
Branch: main
Commit: 7288fcdc72492aa6ffc7dd08f0a5cbd1b969a380
URL: http://cgit.freedesktop.org/mesa/mesa/commit/?id=7288fcdc72492aa6ffc7dd08f0a5cbd1b969a380
Author: Dave Airlie <airlied at redhat.com>
Date: Thu Dec 9 13:00:08 2021 +1000
mesa/st: migrate most of state tracker buffer objects into mesa
This moves all of non-optional st functions into the main bufferobj.c
file.
Reviewed-by: Marek Olšák <marek.olsak at amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14133>
---
src/mesa/main/bufferobj.c | 553 +++++++++++++++++++++++++++
src/mesa/main/bufferobj.h | 49 +++
src/mesa/state_tracker/st_cb_bufferobjects.c | 553 +--------------------------
src/mesa/state_tracker/st_cb_bufferobjects.h | 49 ---
4 files changed, 603 insertions(+), 601 deletions(-)
diff --git a/src/mesa/main/bufferobj.c b/src/mesa/main/bufferobj.c
index 15d70b077fc..50846cc6b69 100644
--- a/src/mesa/main/bufferobj.c
+++ b/src/mesa/main/bufferobj.c
@@ -51,6 +51,11 @@
#include "util/set.h"
#include "state_tracker/st_cb_bufferobjects.h"
+#include "state_tracker/st_cb_memoryobjects.h"
+
+#include "state_tracker/st_debug.h"
+#include "state_tracker/st_atom.h"
+#include "frontend/api.h"
#include "util/u_inlines.h"
/* Debug flags */
@@ -66,6 +71,554 @@
#define BUFFER_WARNING_CALL_COUNT 4
+/**
+ * Replace data in a subrange of buffer object. If the data range
+ * specified by size + offset extends beyond the end of the buffer or
+ * if data is NULL, no copy is performed.
+ * Called via glBufferSubDataARB().
+ */
+void
+st_bufferobj_subdata(struct gl_context *ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const void * data, struct gl_buffer_object *obj)
+{
+ /* we may be called from VBO code, so double-check params here */
+ assert(offset >= 0);
+ assert(size >= 0);
+ assert(offset + size <= obj->Size);
+
+ if (!size)
+ return;
+
+ /*
+ * According to ARB_vertex_buffer_object specification, if data is null,
+ * then the contents of the buffer object's data store is undefined. We just
+ * ignore, and leave it unchanged.
+ */
+ if (!data)
+ return;
+
+ if (!obj->buffer) {
+ /* we probably ran out of memory during buffer allocation */
+ return;
+ }
+
+ /* Now that transfers are per-context, we don't have to figure out
+ * flushing here. Usually drivers won't need to flush in this case
+ * even if the buffer is currently referenced by hardware - they
+ * just queue the upload as dma rather than mapping the underlying
+ * buffer directly.
+ *
+ * If the buffer is mapped, suppress implicit buffer range invalidation
+ * by using PIPE_MAP_DIRECTLY.
+ */
+ struct pipe_context *pipe = ctx->pipe;
+
+ pipe->buffer_subdata(pipe, obj->buffer,
+ _mesa_bufferobj_mapped(obj, MAP_USER) ?
+ PIPE_MAP_DIRECTLY : 0,
+ offset, size, data);
+}
+
+
+/**
+ * Called via glGetBufferSubDataARB().
+ */
+void
+st_bufferobj_get_subdata(struct gl_context *ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ void * data, struct gl_buffer_object *obj)
+{
+ /* we may be called from VBO code, so double-check params here */
+ assert(offset >= 0);
+ assert(size >= 0);
+ assert(offset + size <= obj->Size);
+
+ if (!size)
+ return;
+
+ if (!obj->buffer) {
+ /* we probably ran out of memory during buffer allocation */
+ return;
+ }
+
+ pipe_buffer_read(ctx->pipe, obj->buffer,
+ offset, size, data);
+}
+
+
+/**
+ * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
+ */
+static unsigned
+buffer_target_to_bind_flags(GLenum target)
+{
+ switch (target) {
+ case GL_PIXEL_PACK_BUFFER_ARB:
+ case GL_PIXEL_UNPACK_BUFFER_ARB:
+ return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
+ case GL_ARRAY_BUFFER_ARB:
+ return PIPE_BIND_VERTEX_BUFFER;
+ case GL_ELEMENT_ARRAY_BUFFER_ARB:
+ return PIPE_BIND_INDEX_BUFFER;
+ case GL_TEXTURE_BUFFER:
+ return PIPE_BIND_SAMPLER_VIEW;
+ case GL_TRANSFORM_FEEDBACK_BUFFER:
+ return PIPE_BIND_STREAM_OUTPUT;
+ case GL_UNIFORM_BUFFER:
+ return PIPE_BIND_CONSTANT_BUFFER;
+ case GL_DRAW_INDIRECT_BUFFER:
+ case GL_PARAMETER_BUFFER_ARB:
+ return PIPE_BIND_COMMAND_ARGS_BUFFER;
+ case GL_ATOMIC_COUNTER_BUFFER:
+ case GL_SHADER_STORAGE_BUFFER:
+ return PIPE_BIND_SHADER_BUFFER;
+ case GL_QUERY_BUFFER:
+ return PIPE_BIND_QUERY_BUFFER;
+ default:
+ return 0;
+ }
+}
+
+
+/**
+ * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
+ */
+static unsigned
+storage_flags_to_buffer_flags(GLbitfield storageFlags)
+{
+ unsigned flags = 0;
+ if (storageFlags & GL_MAP_PERSISTENT_BIT)
+ flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
+ if (storageFlags & GL_MAP_COHERENT_BIT)
+ flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
+ if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
+ flags |= PIPE_RESOURCE_FLAG_SPARSE;
+ return flags;
+}
+
+
+/**
+ * From a buffer object's target, immutability flag, storage flags and
+ * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
+ * STREAM, etc).
+ */
+static enum pipe_resource_usage
+buffer_usage(GLenum target, GLboolean immutable,
+ GLbitfield storageFlags, GLenum usage)
+{
+ /* "immutable" means that "storageFlags" was set by the user and "usage"
+ * was guessed by Mesa. Otherwise, "usage" was set by the user and
+ * storageFlags was guessed by Mesa.
+ *
+ * Therefore, use storageFlags with immutable, else use "usage".
+ */
+ if (immutable) {
+ /* BufferStorage */
+ if (storageFlags & GL_MAP_READ_BIT)
+ return PIPE_USAGE_STAGING;
+ else if (storageFlags & GL_CLIENT_STORAGE_BIT)
+ return PIPE_USAGE_STREAM;
+ else
+ return PIPE_USAGE_DEFAULT;
+ }
+ else {
+ /* These are often read by the CPU, so enable CPU caches. */
+ if (target == GL_PIXEL_PACK_BUFFER ||
+ target == GL_PIXEL_UNPACK_BUFFER)
+ return PIPE_USAGE_STAGING;
+
+ /* BufferData */
+ switch (usage) {
+ case GL_DYNAMIC_DRAW:
+ case GL_DYNAMIC_COPY:
+ return PIPE_USAGE_DYNAMIC;
+ case GL_STREAM_DRAW:
+ case GL_STREAM_COPY:
+ return PIPE_USAGE_STREAM;
+ case GL_STATIC_READ:
+ case GL_DYNAMIC_READ:
+ case GL_STREAM_READ:
+ return PIPE_USAGE_STAGING;
+ case GL_STATIC_DRAW:
+ case GL_STATIC_COPY:
+ default:
+ return PIPE_USAGE_DEFAULT;
+ }
+ }
+}
+
+
+static ALWAYS_INLINE GLboolean
+bufferobj_data(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const void *data,
+ struct gl_memory_object *memObj,
+ GLuint64 offset,
+ GLenum usage,
+ GLbitfield storageFlags,
+ struct gl_buffer_object *obj)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_screen *screen = pipe->screen;
+ struct st_memory_object *st_mem_obj = st_memory_object(memObj);
+ bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
+
+ if (size > UINT32_MAX || offset > UINT32_MAX) {
+ /* pipe_resource.width0 is 32 bits only and increasing it
+ * to 64 bits doesn't make much sense since hw support
+ * for > 4GB resources is limited.
+ */
+ obj->Size = 0;
+ return GL_FALSE;
+ }
+
+ if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
+ size && obj->buffer &&
+ obj->Size == size &&
+ obj->Usage == usage &&
+ obj->StorageFlags == storageFlags) {
+ if (data) {
+ /* Just discard the old contents and write new data.
+ * This should be the same as creating a new buffer, but we avoid
+ * a lot of validation in Mesa.
+ *
+ * If the buffer is mapped, we can't discard it.
+ *
+ * PIPE_MAP_DIRECTLY supresses implicit buffer range
+ * invalidation.
+ */
+ pipe->buffer_subdata(pipe, obj->buffer,
+ is_mapped ? PIPE_MAP_DIRECTLY :
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE,
+ 0, size, data);
+ return GL_TRUE;
+ } else if (is_mapped) {
+ return GL_TRUE; /* can't reallocate, nothing to do */
+ } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
+ pipe->invalidate_resource(pipe, obj->buffer);
+ return GL_TRUE;
+ }
+ }
+
+ obj->Size = size;
+ obj->Usage = usage;
+ obj->StorageFlags = storageFlags;
+
+ mesa_buffer_object_release_buffer(obj);
+
+ unsigned bindings = buffer_target_to_bind_flags(target);
+
+ if (storageFlags & MESA_GALLIUM_VERTEX_STATE_STORAGE)
+ bindings |= PIPE_BIND_VERTEX_STATE;
+
+ if (ST_DEBUG & DEBUG_BUFFER) {
+ debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
+ (int64_t) size, bindings);
+ }
+
+ if (size != 0) {
+ struct pipe_resource buffer;
+
+ memset(&buffer, 0, sizeof buffer);
+ buffer.target = PIPE_BUFFER;
+ buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
+ buffer.bind = bindings;
+ buffer.usage =
+ buffer_usage(target, obj->Immutable, storageFlags, usage);
+ buffer.flags = storage_flags_to_buffer_flags(storageFlags);
+ buffer.width0 = size;
+ buffer.height0 = 1;
+ buffer.depth0 = 1;
+ buffer.array_size = 1;
+
+ if (st_mem_obj) {
+ obj->buffer = screen->resource_from_memobj(screen, &buffer,
+ st_mem_obj->memory,
+ offset);
+ }
+ else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
+ obj->buffer =
+ screen->resource_from_user_memory(screen, &buffer, (void*)data);
+ }
+ else {
+ obj->buffer = screen->resource_create(screen, &buffer);
+
+ if (obj->buffer && data)
+ pipe_buffer_write(pipe, obj->buffer, 0, size, data);
+ }
+
+ if (!obj->buffer) {
+ /* out of memory */
+ obj->Size = 0;
+ return GL_FALSE;
+ }
+
+ obj->private_refcount_ctx = ctx;
+ }
+
+ /* The current buffer may be bound, so we have to revalidate all atoms that
+ * might be using it.
+ */
+ if (obj->UsageHistory & USAGE_ARRAY_BUFFER)
+ ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
+ if (obj->UsageHistory & USAGE_UNIFORM_BUFFER)
+ ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
+ if (obj->UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
+ ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
+ if (obj->UsageHistory & USAGE_TEXTURE_BUFFER)
+ ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
+ if (obj->UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
+ ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
+
+ return GL_TRUE;
+}
+
+/**
+ * Allocate space for and store data in a buffer object. Any data that was
+ * previously stored in the buffer object is lost. If data is NULL,
+ * memory will be allocated, but no copy will occur.
+ * Called via ctx->Driver.BufferData().
+ * \return GL_TRUE for success, GL_FALSE if out of memory
+ */
+GLboolean
+st_bufferobj_data(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const void *data,
+ GLenum usage,
+ GLbitfield storageFlags,
+ struct gl_buffer_object *obj)
+{
+ return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
+}
+
+GLboolean
+st_bufferobj_data_mem(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ struct gl_memory_object *memObj,
+ GLuint64 offset,
+ GLenum usage,
+ struct gl_buffer_object *bufObj)
+{
+ return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj);
+}
+
+/**
+ * Convert GLbitfield of GL_MAP_x flags to gallium pipe_map_flags flags.
+ * \param wholeBuffer is the whole buffer being mapped?
+ */
+enum pipe_map_flags
+st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
+{
+ enum pipe_map_flags flags = 0;
+
+ if (access & GL_MAP_WRITE_BIT)
+ flags |= PIPE_MAP_WRITE;
+
+ if (access & GL_MAP_READ_BIT)
+ flags |= PIPE_MAP_READ;
+
+ if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
+ flags |= PIPE_MAP_FLUSH_EXPLICIT;
+
+ if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
+ flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
+ }
+ else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
+ if (wholeBuffer)
+ flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
+ else
+ flags |= PIPE_MAP_DISCARD_RANGE;
+ }
+
+ if (access & GL_MAP_UNSYNCHRONIZED_BIT)
+ flags |= PIPE_MAP_UNSYNCHRONIZED;
+
+ if (access & GL_MAP_PERSISTENT_BIT)
+ flags |= PIPE_MAP_PERSISTENT;
+
+ if (access & GL_MAP_COHERENT_BIT)
+ flags |= PIPE_MAP_COHERENT;
+
+ /* ... other flags ...
+ */
+
+ if (access & MESA_MAP_NOWAIT_BIT)
+ flags |= PIPE_MAP_DONTBLOCK;
+ if (access & MESA_MAP_THREAD_SAFE_BIT)
+ flags |= PIPE_MAP_THREAD_SAFE;
+ if (access & MESA_MAP_ONCE)
+ flags |= PIPE_MAP_ONCE;
+
+ return flags;
+}
+
+
+/**
+ * Called via glMapBufferRange().
+ */
+void *
+st_bufferobj_map_range(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr length, GLbitfield access,
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
+{
+ struct pipe_context *pipe = ctx->pipe;
+
+ assert(offset >= 0);
+ assert(length >= 0);
+ assert(offset < obj->Size);
+ assert(offset + length <= obj->Size);
+
+ enum pipe_map_flags transfer_flags =
+ st_access_flags_to_transfer_flags(access,
+ offset == 0 && length == obj->Size);
+
+ /* Sometimes games do silly things like MapBufferRange(UNSYNC|DISCARD_RANGE)
+ * In this case, the the UNSYNC is a bit redundant, but the games rely
+ * on the driver rebinding/replacing the backing storage rather than
+ * going down the UNSYNC path (ie. honoring DISCARD_x first before UNSYNC).
+ */
+ if (unlikely(ctx->st_opts->ignore_map_unsynchronized)) {
+ if (transfer_flags & (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE))
+ transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
+ }
+
+ obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
+ obj->buffer,
+ offset, length,
+ transfer_flags,
+ &obj->transfer[index]);
+ if (obj->Mappings[index].Pointer) {
+ obj->Mappings[index].Offset = offset;
+ obj->Mappings[index].Length = length;
+ obj->Mappings[index].AccessFlags = access;
+ }
+ else {
+ obj->transfer[index] = NULL;
+ }
+
+ return obj->Mappings[index].Pointer;
+}
+
+
+void
+st_bufferobj_flush_mapped_range(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr length,
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
+{
+ struct pipe_context *pipe = ctx->pipe;
+
+ /* Subrange is relative to mapped range */
+ assert(offset >= 0);
+ assert(length >= 0);
+ assert(offset + length <= obj->Mappings[index].Length);
+ assert(obj->Mappings[index].Pointer);
+
+ if (!length)
+ return;
+
+ pipe_buffer_flush_mapped_range(pipe, obj->transfer[index],
+ obj->Mappings[index].Offset + offset,
+ length);
+}
+
+
+/**
+ * Called via glUnmapBufferARB().
+ */
+GLboolean
+st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index)
+{
+ struct pipe_context *pipe = ctx->pipe;
+
+ if (obj->Mappings[index].Length)
+ pipe_buffer_unmap(pipe, obj->transfer[index]);
+
+ obj->transfer[index] = NULL;
+ obj->Mappings[index].Pointer = NULL;
+ obj->Mappings[index].Offset = 0;
+ obj->Mappings[index].Length = 0;
+ return GL_TRUE;
+}
+
+
+/**
+ * Called via glCopyBufferSubData().
+ */
+void
+st_copy_buffer_subdata(struct gl_context *ctx,
+ struct gl_buffer_object *src,
+ struct gl_buffer_object *dst,
+ GLintptr readOffset, GLintptr writeOffset,
+ GLsizeiptr size)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_box box;
+
+ if (!size)
+ return;
+
+ /* buffer should not already be mapped */
+ assert(!_mesa_check_disallowed_mapping(src));
+ /* dst can be mapped, just not the same range as the target range */
+
+ u_box_1d(readOffset, size, &box);
+
+ pipe->resource_copy_region(pipe, dst->buffer, 0, writeOffset, 0, 0,
+ src->buffer, 0, &box);
+}
+
+/**
+ * Called via glClearBufferSubData().
+ */
+void
+st_clear_buffer_subdata(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr size,
+ const void *clearValue,
+ GLsizeiptr clearValueSize,
+ struct gl_buffer_object *bufObj)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ static const char zeros[16] = {0};
+
+ if (!pipe->clear_buffer) {
+ _mesa_ClearBufferSubData_sw(ctx, offset, size,
+ clearValue, clearValueSize, bufObj);
+ return;
+ }
+
+ if (!clearValue)
+ clearValue = zeros;
+
+ pipe->clear_buffer(pipe, bufObj->buffer, offset, size,
+ clearValue, clearValueSize);
+}
+
+void
+st_bufferobj_page_commitment(struct gl_context *ctx,
+ struct gl_buffer_object *bufferObj,
+ GLintptr offset, GLsizeiptr size,
+ GLboolean commit)
+{
+ struct pipe_context *pipe = ctx->pipe;
+ struct pipe_box box;
+
+ u_box_1d(offset, size, &box);
+
+ if (!pipe->resource_commit(pipe, bufferObj->buffer, 0, &box, commit)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
+ return;
+ }
+}
+
/**
* Helper to warn of possible performance issues, such as frequently
* updating a buffer created with GL_STATIC_DRAW. Called via the macro
diff --git a/src/mesa/main/bufferobj.h b/src/mesa/main/bufferobj.h
index 23d7d26789e..f7b417372fe 100644
--- a/src/mesa/main/bufferobj.h
+++ b/src/mesa/main/bufferobj.h
@@ -68,6 +68,55 @@ _mesa_get_buffer_object_reference(struct gl_context *ctx, struct gl_buffer_objec
return buffer;
}
+void st_bufferobj_subdata(struct gl_context *ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const void * data, struct gl_buffer_object *obj);
+void st_bufferobj_get_subdata(struct gl_context *ctx,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ void * data, struct gl_buffer_object *obj);
+GLboolean st_bufferobj_data(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const void *data,
+ GLenum usage,
+ GLbitfield storageFlags,
+ struct gl_buffer_object *obj);
+GLboolean st_bufferobj_data_mem(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ struct gl_memory_object *memObj,
+ GLuint64 offset,
+ GLenum usage,
+ struct gl_buffer_object *bufObj);
+void *st_bufferobj_map_range(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr length,
+ GLbitfield access,
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
+
+void st_bufferobj_flush_mapped_range(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr length,
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
+GLboolean st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
+void st_copy_buffer_subdata(struct gl_context *ctx,
+ struct gl_buffer_object *src,
+ struct gl_buffer_object *dst,
+ GLintptr readOffset, GLintptr writeOffset,
+ GLsizeiptr size);
+void st_clear_buffer_subdata(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr size,
+ const void *clearValue,
+ GLsizeiptr clearValueSize,
+ struct gl_buffer_object *bufObj);
+void st_bufferobj_page_commitment(struct gl_context *ctx,
+ struct gl_buffer_object *bufferObj,
+ GLintptr offset, GLsizeiptr size,
+ GLboolean commit);
+
struct gl_buffer_object *
_mesa_internal_buffer_object_alloc(struct gl_context *ctx, GLuint id);
void
diff --git a/src/mesa/state_tracker/st_cb_bufferobjects.c b/src/mesa/state_tracker/st_cb_bufferobjects.c
index aa08c537e09..8af8e8d8a9d 100644
--- a/src/mesa/state_tracker/st_cb_bufferobjects.c
+++ b/src/mesa/state_tracker/st_cb_bufferobjects.c
@@ -50,344 +50,6 @@
#include "util/u_inlines.h"
-/**
- * Replace data in a subrange of buffer object. If the data range
- * specified by size + offset extends beyond the end of the buffer or
- * if data is NULL, no copy is performed.
- * Called via glBufferSubDataARB().
- */
-void
-st_bufferobj_subdata(struct gl_context *ctx,
- GLintptrARB offset,
- GLsizeiptrARB size,
- const void * data, struct gl_buffer_object *obj)
-{
- /* we may be called from VBO code, so double-check params here */
- assert(offset >= 0);
- assert(size >= 0);
- assert(offset + size <= obj->Size);
-
- if (!size)
- return;
-
- /*
- * According to ARB_vertex_buffer_object specification, if data is null,
- * then the contents of the buffer object's data store is undefined. We just
- * ignore, and leave it unchanged.
- */
- if (!data)
- return;
-
- if (!obj->buffer) {
- /* we probably ran out of memory during buffer allocation */
- return;
- }
-
- /* Now that transfers are per-context, we don't have to figure out
- * flushing here. Usually drivers won't need to flush in this case
- * even if the buffer is currently referenced by hardware - they
- * just queue the upload as dma rather than mapping the underlying
- * buffer directly.
- *
- * If the buffer is mapped, suppress implicit buffer range invalidation
- * by using PIPE_MAP_DIRECTLY.
- */
- struct pipe_context *pipe = st_context(ctx)->pipe;
-
- pipe->buffer_subdata(pipe, obj->buffer,
- _mesa_bufferobj_mapped(obj, MAP_USER) ?
- PIPE_MAP_DIRECTLY : 0,
- offset, size, data);
-}
-
-
-/**
- * Called via glGetBufferSubDataARB().
- */
-void
-st_bufferobj_get_subdata(struct gl_context *ctx,
- GLintptrARB offset,
- GLsizeiptrARB size,
- void * data, struct gl_buffer_object *obj)
-{
- /* we may be called from VBO code, so double-check params here */
- assert(offset >= 0);
- assert(size >= 0);
- assert(offset + size <= obj->Size);
-
- if (!size)
- return;
-
- if (!obj->buffer) {
- /* we probably ran out of memory during buffer allocation */
- return;
- }
-
- pipe_buffer_read(st_context(ctx)->pipe, obj->buffer,
- offset, size, data);
-}
-
-
-/**
- * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
- */
-static unsigned
-buffer_target_to_bind_flags(GLenum target)
-{
- switch (target) {
- case GL_PIXEL_PACK_BUFFER_ARB:
- case GL_PIXEL_UNPACK_BUFFER_ARB:
- return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
- case GL_ARRAY_BUFFER_ARB:
- return PIPE_BIND_VERTEX_BUFFER;
- case GL_ELEMENT_ARRAY_BUFFER_ARB:
- return PIPE_BIND_INDEX_BUFFER;
- case GL_TEXTURE_BUFFER:
- return PIPE_BIND_SAMPLER_VIEW;
- case GL_TRANSFORM_FEEDBACK_BUFFER:
- return PIPE_BIND_STREAM_OUTPUT;
- case GL_UNIFORM_BUFFER:
- return PIPE_BIND_CONSTANT_BUFFER;
- case GL_DRAW_INDIRECT_BUFFER:
- case GL_PARAMETER_BUFFER_ARB:
- return PIPE_BIND_COMMAND_ARGS_BUFFER;
- case GL_ATOMIC_COUNTER_BUFFER:
- case GL_SHADER_STORAGE_BUFFER:
- return PIPE_BIND_SHADER_BUFFER;
- case GL_QUERY_BUFFER:
- return PIPE_BIND_QUERY_BUFFER;
- default:
- return 0;
- }
-}
-
-
-/**
- * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
- */
-static unsigned
-storage_flags_to_buffer_flags(GLbitfield storageFlags)
-{
- unsigned flags = 0;
- if (storageFlags & GL_MAP_PERSISTENT_BIT)
- flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
- if (storageFlags & GL_MAP_COHERENT_BIT)
- flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
- if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
- flags |= PIPE_RESOURCE_FLAG_SPARSE;
- return flags;
-}
-
-
-/**
- * From a buffer object's target, immutability flag, storage flags and
- * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
- * STREAM, etc).
- */
-static enum pipe_resource_usage
-buffer_usage(GLenum target, GLboolean immutable,
- GLbitfield storageFlags, GLenum usage)
-{
- /* "immutable" means that "storageFlags" was set by the user and "usage"
- * was guessed by Mesa. Otherwise, "usage" was set by the user and
- * storageFlags was guessed by Mesa.
- *
- * Therefore, use storageFlags with immutable, else use "usage".
- */
- if (immutable) {
- /* BufferStorage */
- if (storageFlags & GL_MAP_READ_BIT)
- return PIPE_USAGE_STAGING;
- else if (storageFlags & GL_CLIENT_STORAGE_BIT)
- return PIPE_USAGE_STREAM;
- else
- return PIPE_USAGE_DEFAULT;
- }
- else {
- /* These are often read by the CPU, so enable CPU caches. */
- if (target == GL_PIXEL_PACK_BUFFER ||
- target == GL_PIXEL_UNPACK_BUFFER)
- return PIPE_USAGE_STAGING;
-
- /* BufferData */
- switch (usage) {
- case GL_DYNAMIC_DRAW:
- case GL_DYNAMIC_COPY:
- return PIPE_USAGE_DYNAMIC;
- case GL_STREAM_DRAW:
- case GL_STREAM_COPY:
- return PIPE_USAGE_STREAM;
- case GL_STATIC_READ:
- case GL_DYNAMIC_READ:
- case GL_STREAM_READ:
- return PIPE_USAGE_STAGING;
- case GL_STATIC_DRAW:
- case GL_STATIC_COPY:
- default:
- return PIPE_USAGE_DEFAULT;
- }
- }
-}
-
-
-static ALWAYS_INLINE GLboolean
-bufferobj_data(struct gl_context *ctx,
- GLenum target,
- GLsizeiptrARB size,
- const void *data,
- struct gl_memory_object *memObj,
- GLuint64 offset,
- GLenum usage,
- GLbitfield storageFlags,
- struct gl_buffer_object *obj)
-{
- struct st_context *st = st_context(ctx);
- struct pipe_context *pipe = st->pipe;
- struct pipe_screen *screen = st->screen;
- struct st_memory_object *st_mem_obj = st_memory_object(memObj);
- bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
-
- if (size > UINT32_MAX || offset > UINT32_MAX) {
- /* pipe_resource.width0 is 32 bits only and increasing it
- * to 64 bits doesn't make much sense since hw support
- * for > 4GB resources is limited.
- */
- obj->Size = 0;
- return GL_FALSE;
- }
-
- if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
- size && obj->buffer &&
- obj->Size == size &&
- obj->Usage == usage &&
- obj->StorageFlags == storageFlags) {
- if (data) {
- /* Just discard the old contents and write new data.
- * This should be the same as creating a new buffer, but we avoid
- * a lot of validation in Mesa.
- *
- * If the buffer is mapped, we can't discard it.
- *
- * PIPE_MAP_DIRECTLY supresses implicit buffer range
- * invalidation.
- */
- pipe->buffer_subdata(pipe, obj->buffer,
- is_mapped ? PIPE_MAP_DIRECTLY :
- PIPE_MAP_DISCARD_WHOLE_RESOURCE,
- 0, size, data);
- return GL_TRUE;
- } else if (is_mapped) {
- return GL_TRUE; /* can't reallocate, nothing to do */
- } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
- pipe->invalidate_resource(pipe, obj->buffer);
- return GL_TRUE;
- }
- }
-
- obj->Size = size;
- obj->Usage = usage;
- obj->StorageFlags = storageFlags;
-
- mesa_buffer_object_release_buffer(obj);
-
- unsigned bindings = buffer_target_to_bind_flags(target);
-
- if (storageFlags & MESA_GALLIUM_VERTEX_STATE_STORAGE)
- bindings |= PIPE_BIND_VERTEX_STATE;
-
- if (ST_DEBUG & DEBUG_BUFFER) {
- debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
- (int64_t) size, bindings);
- }
-
- if (size != 0) {
- struct pipe_resource buffer;
-
- memset(&buffer, 0, sizeof buffer);
- buffer.target = PIPE_BUFFER;
- buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
- buffer.bind = bindings;
- buffer.usage =
- buffer_usage(target, obj->Immutable, storageFlags, usage);
- buffer.flags = storage_flags_to_buffer_flags(storageFlags);
- buffer.width0 = size;
- buffer.height0 = 1;
- buffer.depth0 = 1;
- buffer.array_size = 1;
-
- if (st_mem_obj) {
- obj->buffer = screen->resource_from_memobj(screen, &buffer,
- st_mem_obj->memory,
- offset);
- }
- else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
- obj->buffer =
- screen->resource_from_user_memory(screen, &buffer, (void*)data);
- }
- else {
- obj->buffer = screen->resource_create(screen, &buffer);
-
- if (obj->buffer && data)
- pipe_buffer_write(pipe, obj->buffer, 0, size, data);
- }
-
- if (!obj->buffer) {
- /* out of memory */
- obj->Size = 0;
- return GL_FALSE;
- }
-
- obj->private_refcount_ctx = ctx;
- }
-
- /* The current buffer may be bound, so we have to revalidate all atoms that
- * might be using it.
- */
- if (obj->UsageHistory & USAGE_ARRAY_BUFFER)
- ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
- if (obj->UsageHistory & USAGE_UNIFORM_BUFFER)
- ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
- if (obj->UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
- ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
- if (obj->UsageHistory & USAGE_TEXTURE_BUFFER)
- ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
- if (obj->UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
- ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
-
- return GL_TRUE;
-}
-
-/**
- * Allocate space for and store data in a buffer object. Any data that was
- * previously stored in the buffer object is lost. If data is NULL,
- * memory will be allocated, but no copy will occur.
- * Called via ctx->Driver.BufferData().
- * \return GL_TRUE for success, GL_FALSE if out of memory
- */
-GLboolean
-st_bufferobj_data(struct gl_context *ctx,
- GLenum target,
- GLsizeiptrARB size,
- const void *data,
- GLenum usage,
- GLbitfield storageFlags,
- struct gl_buffer_object *obj)
-{
- return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
-}
-
-GLboolean
-st_bufferobj_data_mem(struct gl_context *ctx,
- GLenum target,
- GLsizeiptrARB size,
- struct gl_memory_object *memObj,
- GLuint64 offset,
- GLenum usage,
- struct gl_buffer_object *bufObj)
-{
- return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj);
-}
-
/**
* Called via glInvalidateBuffer(Sub)Data.
*/
@@ -397,8 +59,7 @@ st_bufferobj_invalidate(struct gl_context *ctx,
GLintptr offset,
GLsizeiptr size)
{
- struct st_context *st = st_context(ctx);
- struct pipe_context *pipe = st->pipe;
+ struct pipe_context *pipe = ctx->pipe;
/* We ignore partial invalidates. */
if (offset != 0 || size != obj->Size)
@@ -411,218 +72,6 @@ st_bufferobj_invalidate(struct gl_context *ctx,
pipe->invalidate_resource(pipe, obj->buffer);
}
-
-/**
- * Convert GLbitfield of GL_MAP_x flags to gallium pipe_map_flags flags.
- * \param wholeBuffer is the whole buffer being mapped?
- */
-enum pipe_map_flags
-st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
-{
- enum pipe_map_flags flags = 0;
-
- if (access & GL_MAP_WRITE_BIT)
- flags |= PIPE_MAP_WRITE;
-
- if (access & GL_MAP_READ_BIT)
- flags |= PIPE_MAP_READ;
-
- if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
- flags |= PIPE_MAP_FLUSH_EXPLICIT;
-
- if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
- flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
- }
- else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
- if (wholeBuffer)
- flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
- else
- flags |= PIPE_MAP_DISCARD_RANGE;
- }
-
- if (access & GL_MAP_UNSYNCHRONIZED_BIT)
- flags |= PIPE_MAP_UNSYNCHRONIZED;
-
- if (access & GL_MAP_PERSISTENT_BIT)
- flags |= PIPE_MAP_PERSISTENT;
-
- if (access & GL_MAP_COHERENT_BIT)
- flags |= PIPE_MAP_COHERENT;
-
- /* ... other flags ...
- */
-
- if (access & MESA_MAP_NOWAIT_BIT)
- flags |= PIPE_MAP_DONTBLOCK;
- if (access & MESA_MAP_THREAD_SAFE_BIT)
- flags |= PIPE_MAP_THREAD_SAFE;
- if (access & MESA_MAP_ONCE)
- flags |= PIPE_MAP_ONCE;
-
- return flags;
-}
-
-
-/**
- * Called via glMapBufferRange().
- */
-void *
-st_bufferobj_map_range(struct gl_context *ctx,
- GLintptr offset, GLsizeiptr length, GLbitfield access,
- struct gl_buffer_object *obj,
- gl_map_buffer_index index)
-{
- struct pipe_context *pipe = st_context(ctx)->pipe;
-
- assert(offset >= 0);
- assert(length >= 0);
- assert(offset < obj->Size);
- assert(offset + length <= obj->Size);
-
- enum pipe_map_flags transfer_flags =
- st_access_flags_to_transfer_flags(access,
- offset == 0 && length == obj->Size);
-
- /* Sometimes games do silly things like MapBufferRange(UNSYNC|DISCARD_RANGE)
- * In this case, the the UNSYNC is a bit redundant, but the games rely
- * on the driver rebinding/replacing the backing storage rather than
- * going down the UNSYNC path (ie. honoring DISCARD_x first before UNSYNC).
- */
- if (unlikely(st_context(ctx)->options.ignore_map_unsynchronized)) {
- if (transfer_flags & (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE))
- transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
- }
-
- obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
- obj->buffer,
- offset, length,
- transfer_flags,
- &obj->transfer[index]);
- if (obj->Mappings[index].Pointer) {
- obj->Mappings[index].Offset = offset;
- obj->Mappings[index].Length = length;
- obj->Mappings[index].AccessFlags = access;
- }
- else {
- obj->transfer[index] = NULL;
- }
-
- return obj->Mappings[index].Pointer;
-}
-
-
-void
-st_bufferobj_flush_mapped_range(struct gl_context *ctx,
- GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj,
- gl_map_buffer_index index)
-{
- struct pipe_context *pipe = st_context(ctx)->pipe;
-
- /* Subrange is relative to mapped range */
- assert(offset >= 0);
- assert(length >= 0);
- assert(offset + length <= obj->Mappings[index].Length);
- assert(obj->Mappings[index].Pointer);
-
- if (!length)
- return;
-
- pipe_buffer_flush_mapped_range(pipe, obj->transfer[index],
- obj->Mappings[index].Offset + offset,
- length);
-}
-
-
-/**
- * Called via glUnmapBufferARB().
- */
-GLboolean
-st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
- gl_map_buffer_index index)
-{
- struct pipe_context *pipe = st_context(ctx)->pipe;
-
- if (obj->Mappings[index].Length)
- pipe_buffer_unmap(pipe, obj->transfer[index]);
-
- obj->transfer[index] = NULL;
- obj->Mappings[index].Pointer = NULL;
- obj->Mappings[index].Offset = 0;
- obj->Mappings[index].Length = 0;
- return GL_TRUE;
-}
-
-
-/**
- * Called via glCopyBufferSubData().
- */
-void
-st_copy_buffer_subdata(struct gl_context *ctx,
- struct gl_buffer_object *src,
- struct gl_buffer_object *dst,
- GLintptr readOffset, GLintptr writeOffset,
- GLsizeiptr size)
-{
- struct pipe_context *pipe = st_context(ctx)->pipe;
- struct pipe_box box;
-
- if (!size)
- return;
-
- /* buffer should not already be mapped */
- assert(!_mesa_check_disallowed_mapping(src));
- /* dst can be mapped, just not the same range as the target range */
-
- u_box_1d(readOffset, size, &box);
-
- pipe->resource_copy_region(pipe, dst->buffer, 0, writeOffset, 0, 0,
- src->buffer, 0, &box);
-}
-
-/**
- * Called via glClearBufferSubData().
- */
-void
-st_clear_buffer_subdata(struct gl_context *ctx,
- GLintptr offset, GLsizeiptr size,
- const void *clearValue,
- GLsizeiptr clearValueSize,
- struct gl_buffer_object *bufObj)
-{
- struct pipe_context *pipe = st_context(ctx)->pipe;
- static const char zeros[16] = {0};
-
- if (!pipe->clear_buffer) {
- _mesa_ClearBufferSubData_sw(ctx, offset, size,
- clearValue, clearValueSize, bufObj);
- return;
- }
-
- if (!clearValue)
- clearValue = zeros;
-
- pipe->clear_buffer(pipe, bufObj->buffer, offset, size,
- clearValue, clearValueSize);
-}
-
-void
-st_bufferobj_page_commitment(struct gl_context *ctx,
- struct gl_buffer_object *bufferObj,
- GLintptr offset, GLsizeiptr size,
- GLboolean commit)
-{
- struct pipe_context *pipe = st_context(ctx)->pipe;
- struct pipe_box box;
-
- u_box_1d(offset, size, &box);
-
- if (!pipe->resource_commit(pipe, bufferObj->buffer, 0, &box, commit)) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
- return;
- }
-}
-
void
st_init_bufferobject_functions(struct pipe_screen *screen,
struct dd_function_table *functions)
diff --git a/src/mesa/state_tracker/st_cb_bufferobjects.h b/src/mesa/state_tracker/st_cb_bufferobjects.h
index 004a7ee8db2..3c39fd04f26 100644
--- a/src/mesa/state_tracker/st_cb_bufferobjects.h
+++ b/src/mesa/state_tracker/st_cb_bufferobjects.h
@@ -43,53 +43,4 @@ extern void
st_init_bufferobject_functions(struct pipe_screen *screen,
struct dd_function_table *functions);
-void st_bufferobj_subdata(struct gl_context *ctx,
- GLintptrARB offset,
- GLsizeiptrARB size,
- const void * data, struct gl_buffer_object *obj);
-void st_bufferobj_get_subdata(struct gl_context *ctx,
- GLintptrARB offset,
- GLsizeiptrARB size,
- void * data, struct gl_buffer_object *obj);
-GLboolean st_bufferobj_data(struct gl_context *ctx,
- GLenum target,
- GLsizeiptrARB size,
- const void *data,
- GLenum usage,
- GLbitfield storageFlags,
- struct gl_buffer_object *obj);
-GLboolean st_bufferobj_data_mem(struct gl_context *ctx,
- GLenum target,
- GLsizeiptrARB size,
- struct gl_memory_object *memObj,
- GLuint64 offset,
- GLenum usage,
- struct gl_buffer_object *bufObj);
-void *st_bufferobj_map_range(struct gl_context *ctx,
- GLintptr offset, GLsizeiptr length,
- GLbitfield access,
- struct gl_buffer_object *obj,
- gl_map_buffer_index index);
-
-void st_bufferobj_flush_mapped_range(struct gl_context *ctx,
- GLintptr offset, GLsizeiptr length,
- struct gl_buffer_object *obj,
- gl_map_buffer_index index);
-GLboolean st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
- gl_map_buffer_index index);
-void st_copy_buffer_subdata(struct gl_context *ctx,
- struct gl_buffer_object *src,
- struct gl_buffer_object *dst,
- GLintptr readOffset, GLintptr writeOffset,
- GLsizeiptr size);
-void st_clear_buffer_subdata(struct gl_context *ctx,
- GLintptr offset, GLsizeiptr size,
- const void *clearValue,
- GLsizeiptr clearValueSize,
- struct gl_buffer_object *bufObj);
-void st_bufferobj_page_commitment(struct gl_context *ctx,
- struct gl_buffer_object *bufferObj,
- GLintptr offset, GLsizeiptr size,
- GLboolean commit);
-
#endif
More information about the mesa-commit
mailing list