[Mesa-dev] [PATCH 3/4] gallium: remove pipe_index_buffer and set_index_buffer
Marek Olšák
maraeo at gmail.com
Fri Apr 28 23:12:08 UTC 2017
From: Marek Olšák <marek.olsak at amd.com>
pipe_draw_info::indexed is replaced with index_size. index_size == 0 means
non-indexed.
Instead of pipe_index_buffer::offset, pipe_draw_info::start is used.
For indexed indirect draws, pipe_draw_info::start is added to the indirect
start. This is the only case when "start" affects indirect draws.
pipe_draw_info::index is a union. Use either index::resource or
index::user depending on the value of pipe_draw_info::has_user_indices.
Performance numbers with the drawoverhead microbenchmark.
Before::
DrawElements only: 4.5 million draws/second
DrawElements w/ nop state change: 4.4 million draws/sec (overhead: 0.000007 ms/draw)
DrawElements w/ state change: 1.9 million draws/sec (overhead: 0.000293 ms/draw)
After:
DrawElements only: 5.0 million draws/second
DrawElements w/ nop state change: 4.9 million draws/sec (overhead: 0.000007 ms/draw)
DrawElements w/ state change: 2.2 million draws/sec (overhead: 0.000254 ms/draw)
The improvement will be much smaller with real apps.
---
src/gallium/auxiliary/cso_cache/cso_context.c | 16 ---
src/gallium/auxiliary/cso_cache/cso_context.h | 4 -
src/gallium/auxiliary/draw/draw_context.c | 3 -
src/gallium/auxiliary/draw/draw_pt.c | 6 +-
src/gallium/auxiliary/indices/u_primconvert.c | 56 +++------
src/gallium/auxiliary/indices/u_primconvert.h | 2 -
src/gallium/auxiliary/util/u_draw.c | 6 +-
src/gallium/auxiliary/util/u_draw.h | 9 +-
src/gallium/auxiliary/util/u_dump.h | 3 -
src/gallium/auxiliary/util/u_dump_state.c | 23 +---
src/gallium/auxiliary/util/u_helpers.c | 44 ++-----
src/gallium/auxiliary/util/u_helpers.h | 11 +-
src/gallium/auxiliary/util/u_index_modify.c | 24 ++--
src/gallium/auxiliary/util/u_index_modify.h | 7 +-
src/gallium/auxiliary/util/u_prim_restart.c | 52 ++++----
src/gallium/auxiliary/util/u_prim_restart.h | 9 +-
src/gallium/auxiliary/util/u_vbuf.c | 135 ++++++++-------------
src/gallium/auxiliary/util/u_vbuf.h | 2 -
src/gallium/docs/source/context.rst | 6 +-
src/gallium/drivers/ddebug/dd_context.c | 12 --
src/gallium/drivers/ddebug/dd_draw.c | 29 +++--
src/gallium/drivers/ddebug/dd_pipe.h | 1 -
src/gallium/drivers/etnaviv/etnaviv_context.c | 29 +++--
src/gallium/drivers/etnaviv/etnaviv_context.h | 1 -
src/gallium/drivers/etnaviv/etnaviv_emit.c | 3 +-
src/gallium/drivers/etnaviv/etnaviv_state.c | 29 -----
src/gallium/drivers/freedreno/a2xx/fd2_draw.c | 3 +-
src/gallium/drivers/freedreno/a3xx/fd3_draw.c | 5 +-
src/gallium/drivers/freedreno/a3xx/fd3_emit.c | 2 +-
src/gallium/drivers/freedreno/a4xx/fd4_draw.c | 13 +-
src/gallium/drivers/freedreno/a4xx/fd4_draw.h | 17 ++-
src/gallium/drivers/freedreno/a4xx/fd4_emit.c | 2 +-
src/gallium/drivers/freedreno/a5xx/fd5_draw.c | 11 +-
src/gallium/drivers/freedreno/a5xx/fd5_draw.h | 17 ++-
src/gallium/drivers/freedreno/freedreno_context.h | 6 +-
src/gallium/drivers/freedreno/freedreno_draw.c | 20 ++-
src/gallium/drivers/freedreno/freedreno_draw.h | 14 +--
src/gallium/drivers/freedreno/freedreno_resource.c | 4 -
src/gallium/drivers/freedreno/freedreno_state.c | 19 ---
src/gallium/drivers/freedreno/ir3/ir3_shader.c | 2 +-
src/gallium/drivers/i915/i915_context.c | 10 +-
src/gallium/drivers/i915/i915_context.h | 1 -
src/gallium/drivers/i915/i915_state.c | 12 --
src/gallium/drivers/llvmpipe/lp_context.h | 1 -
src/gallium/drivers/llvmpipe/lp_draw_arrays.c | 16 +--
src/gallium/drivers/llvmpipe/lp_state_vertex.c | 13 --
src/gallium/drivers/noop/noop_state.c | 6 -
src/gallium/drivers/nouveau/nv30/nv30_context.c | 7 --
src/gallium/drivers/nouveau/nv30/nv30_context.h | 1 -
src/gallium/drivers/nouveau/nv30/nv30_draw.c | 12 +-
src/gallium/drivers/nouveau/nv30/nv30_push.c | 16 +--
src/gallium/drivers/nouveau/nv30/nv30_resource.c | 4 -
src/gallium/drivers/nouveau/nv30/nv30_state.c | 18 ---
src/gallium/drivers/nouveau/nv30/nv30_vbo.c | 31 ++---
src/gallium/drivers/nouveau/nv50/nv50_context.c | 14 ---
src/gallium/drivers/nouveau/nv50/nv50_context.h | 1 -
src/gallium/drivers/nouveau/nv50/nv50_push.c | 12 +-
src/gallium/drivers/nouveau/nv50/nv50_state.c | 24 ----
src/gallium/drivers/nouveau/nv50/nv50_vbo.c | 30 +++--
src/gallium/drivers/nouveau/nvc0/nvc0_context.c | 13 --
src/gallium/drivers/nouveau/nvc0/nvc0_context.h | 3 +-
src/gallium/drivers/nouveau/nvc0/nvc0_state.c | 26 ----
.../drivers/nouveau/nvc0/nvc0_state_validate.c | 3 -
src/gallium/drivers/nouveau/nvc0/nvc0_vbo.c | 71 +++++------
.../drivers/nouveau/nvc0/nvc0_vbo_translate.c | 30 ++---
src/gallium/drivers/r300/r300_context.h | 3 +-
src/gallium/drivers/r300/r300_render.c | 33 +++--
src/gallium/drivers/r300/r300_render_translate.c | 8 +-
src/gallium/drivers/r300/r300_state.c | 33 -----
src/gallium/drivers/r600/r600_pipe.h | 3 -
src/gallium/drivers/r600/r600_state_common.c | 87 +++++--------
src/gallium/drivers/radeonsi/si_pipe.h | 1 -
src/gallium/drivers/radeonsi/si_state.c | 19 ---
src/gallium/drivers/radeonsi/si_state_draw.c | 91 +++++++-------
src/gallium/drivers/rbug/rbug_context.c | 20 ---
src/gallium/drivers/softpipe/sp_context.h | 1 -
src/gallium/drivers/softpipe/sp_draw_arrays.c | 16 +--
src/gallium/drivers/softpipe/sp_state_vertex.c | 14 ---
src/gallium/drivers/svga/svga_context.h | 1 -
src/gallium/drivers/svga/svga_pipe_draw.c | 36 +++---
src/gallium/drivers/svga/svga_pipe_vertex.c | 10 --
src/gallium/drivers/svga/svga_swtnl.h | 3 +-
src/gallium/drivers/svga/svga_swtnl_draw.c | 11 +-
src/gallium/drivers/swr/swr_context.h | 1 -
src/gallium/drivers/swr/swr_draw.cpp | 2 +-
src/gallium/drivers/swr/swr_state.cpp | 42 +++----
src/gallium/drivers/trace/tr_context.c | 19 ---
src/gallium/drivers/trace/tr_dump_state.c | 25 +---
src/gallium/drivers/trace/tr_dump_state.h | 2 -
src/gallium/drivers/vc4/vc4_context.h | 3 +-
src/gallium/drivers/vc4/vc4_draw.c | 20 +--
src/gallium/drivers/vc4/vc4_resource.c | 11 +-
src/gallium/drivers/vc4/vc4_resource.h | 5 +-
src/gallium/drivers/vc4/vc4_state.c | 19 ---
src/gallium/drivers/virgl/virgl_context.c | 39 ++----
src/gallium/drivers/virgl/virgl_context.h | 1 -
src/gallium/drivers/virgl/virgl_encode.c | 6 +-
src/gallium/drivers/virgl/virgl_encode.h | 9 +-
src/gallium/include/pipe/p_context.h | 4 -
src/gallium/include/pipe/p_state.h | 38 +++---
src/gallium/state_trackers/nine/device9.c | 26 ++--
src/gallium/state_trackers/nine/indexbuffer9.c | 15 ++-
src/gallium/state_trackers/nine/indexbuffer9.h | 7 +-
src/gallium/state_trackers/nine/nine_state.c | 49 +++-----
src/gallium/state_trackers/nine/nine_state.h | 8 +-
src/mesa/state_tracker/st_draw.c | 77 +++++-------
src/mesa/state_tracker/st_draw_feedback.c | 20 ++-
107 files changed, 655 insertions(+), 1215 deletions(-)
diff --git a/src/gallium/auxiliary/cso_cache/cso_context.c b/src/gallium/auxiliary/cso_cache/cso_context.c
index 68f7b9e..5558385 100644
--- a/src/gallium/auxiliary/cso_cache/cso_context.c
+++ b/src/gallium/auxiliary/cso_cache/cso_context.c
@@ -342,22 +342,20 @@ out:
}
/**
* Free the CSO context.
*/
void cso_destroy_context( struct cso_context *ctx )
{
unsigned i;
if (ctx->pipe) {
- ctx->pipe->set_index_buffer(ctx->pipe, NULL);
-
ctx->pipe->bind_blend_state( ctx->pipe, NULL );
ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL );
{
static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL };
static void *zeros[PIPE_MAX_SAMPLERS] = { NULL };
struct pipe_screen *scr = ctx->pipe->screen;
enum pipe_shader_type sh;
for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
int maxsam = scr->get_shader_param(scr, sh,
@@ -1690,34 +1688,20 @@ cso_restore_state(struct cso_context *cso)
cso_restore_fragment_image0(cso);
cso->saved_state = 0;
}
/* drawing */
void
-cso_set_index_buffer(struct cso_context *cso,
- const struct pipe_index_buffer *ib)
-{
- struct u_vbuf *vbuf = cso->vbuf;
-
- if (vbuf) {
- u_vbuf_set_index_buffer(vbuf, ib);
- } else {
- struct pipe_context *pipe = cso->pipe;
- pipe->set_index_buffer(pipe, ib);
- }
-}
-
-void
cso_draw_vbo(struct cso_context *cso,
const struct pipe_draw_info *info)
{
struct u_vbuf *vbuf = cso->vbuf;
if (vbuf) {
u_vbuf_draw_vbo(vbuf, info);
} else {
struct pipe_context *pipe = cso->pipe;
pipe->draw_vbo(pipe, info);
diff --git a/src/gallium/auxiliary/cso_cache/cso_context.h b/src/gallium/auxiliary/cso_cache/cso_context.h
index 742bbb5..c21e838 100644
--- a/src/gallium/auxiliary/cso_cache/cso_context.h
+++ b/src/gallium/auxiliary/cso_cache/cso_context.h
@@ -215,24 +215,20 @@ void cso_set_constant_buffer_resource(struct cso_context *cso,
struct pipe_resource *buffer);
void cso_save_constant_buffer_slot0(struct cso_context *cso,
enum pipe_shader_type shader_stage);
void cso_restore_constant_buffer_slot0(struct cso_context *cso,
enum pipe_shader_type shader_stage);
/* drawing */
void
-cso_set_index_buffer(struct cso_context *cso,
- const struct pipe_index_buffer *ib);
-
-void
cso_draw_vbo(struct cso_context *cso,
const struct pipe_draw_info *info);
void
cso_draw_arrays_instanced(struct cso_context *cso, uint mode,
uint start, uint count,
uint start_instance, uint instance_count);
void
cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count);
diff --git a/src/gallium/auxiliary/draw/draw_context.c b/src/gallium/auxiliary/draw/draw_context.c
index 0eee075..9791ec5 100644
--- a/src/gallium/auxiliary/draw/draw_context.c
+++ b/src/gallium/auxiliary/draw/draw_context.c
@@ -770,23 +770,20 @@ draw_buffer(struct draw_context *draw,
void draw_set_render( struct draw_context *draw,
struct vbuf_render *render )
{
draw->render = render;
}
/**
* Tell the draw module where vertex indexes/elements are located, and
* their size (in bytes).
- *
- * Note: the caller must apply the pipe_index_buffer::offset value to
- * the address. The draw module doesn't do that.
*/
void
draw_set_indexes(struct draw_context *draw,
const void *elements, unsigned elem_size,
unsigned elem_buffer_space)
{
assert(elem_size == 0 ||
elem_size == 1 ||
elem_size == 2 ||
elem_size == 4);
diff --git a/src/gallium/auxiliary/draw/draw_pt.c b/src/gallium/auxiliary/draw/draw_pt.c
index 5a49acb..be76a30 100644
--- a/src/gallium/auxiliary/draw/draw_pt.c
+++ b/src/gallium/auxiliary/draw/draw_pt.c
@@ -436,21 +436,21 @@ resolve_draw_info(const struct pipe_draw_info *raw_info,
{
memcpy(info, raw_info, sizeof(struct pipe_draw_info));
if (raw_info->count_from_stream_output) {
struct draw_so_target *target =
(struct draw_so_target *)info->count_from_stream_output;
assert(vertex_buffer != NULL);
info->count = target->internal_offset / vertex_buffer->stride;
/* Stream output draw can not be indexed */
- debug_assert(!info->indexed);
+ debug_assert(!info->index_size);
info->max_index = info->count - 1;
}
}
/**
* Draw vertex arrays.
* This is the main entrypoint into the drawing module. If drawing an indexed
* primitive, the draw_set_indexes() function should have already been called
* to specify the element/index buffer information.
*/
@@ -466,29 +466,29 @@ draw_vbo(struct draw_context *draw,
/* Make sure that denorms are treated like zeros. This is
* the behavior required by D3D10. OpenGL doesn't care.
*/
util_fpstate_set_denorms_to_zero(fpstate);
resolve_draw_info(info, &resolved_info, &(draw->pt.vertex_buffer[0]));
info = &resolved_info;
assert(info->instance_count > 0);
- if (info->indexed)
+ if (info->index_size)
assert(draw->pt.user.elts);
count = info->count;
draw->pt.user.eltBias = info->index_bias;
draw->pt.user.min_index = info->min_index;
draw->pt.user.max_index = info->max_index;
- draw->pt.user.eltSize = info->indexed ? draw->pt.user.eltSizeIB : 0;
+ draw->pt.user.eltSize = info->index_size ? draw->pt.user.eltSizeIB : 0;
if (0)
debug_printf("draw_vbo(mode=%u start=%u count=%u):\n",
info->mode, info->start, count);
if (0)
tgsi_dump(draw->vs.vertex_shader->state.tokens, 0);
if (0) {
unsigned int i;
diff --git a/src/gallium/auxiliary/indices/u_primconvert.c b/src/gallium/auxiliary/indices/u_primconvert.c
index 1ffca4b..778f174 100644
--- a/src/gallium/auxiliary/indices/u_primconvert.c
+++ b/src/gallium/auxiliary/indices/u_primconvert.c
@@ -26,158 +26,136 @@
/**
* This module converts provides a more convenient front-end to u_indices,
* etc, utils to convert primitive types supported not supported by the
* hardware. It handles binding new index buffer state, and restoring
* previous state after. To use, put something like this at the front of
* drivers pipe->draw_vbo():
*
* // emulate unsupported primitives:
* if (info->mode needs emulating) {
- * util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
* util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
* util_primconvert_draw_vbo(ctx->primconvert, info);
* return;
* }
*
*/
#include "pipe/p_state.h"
#include "util/u_draw.h"
#include "util/u_inlines.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
#include "indices/u_indices.h"
#include "indices/u_primconvert.h"
struct primconvert_context
{
struct pipe_context *pipe;
- struct pipe_index_buffer saved_ib;
uint32_t primtypes_mask;
unsigned api_pv;
};
struct primconvert_context *
util_primconvert_create(struct pipe_context *pipe, uint32_t primtypes_mask)
{
struct primconvert_context *pc = CALLOC_STRUCT(primconvert_context);
if (!pc)
return NULL;
pc->pipe = pipe;
pc->primtypes_mask = primtypes_mask;
return pc;
}
void
util_primconvert_destroy(struct primconvert_context *pc)
{
- util_primconvert_save_index_buffer(pc, NULL);
FREE(pc);
}
void
-util_primconvert_save_index_buffer(struct primconvert_context *pc,
- const struct pipe_index_buffer *ib)
-{
- if (ib) {
- pipe_resource_reference(&pc->saved_ib.buffer, ib->buffer);
- pc->saved_ib.index_size = ib->index_size;
- pc->saved_ib.offset = ib->offset;
- pc->saved_ib.user_buffer = ib->user_buffer;
- }
- else {
- pipe_resource_reference(&pc->saved_ib.buffer, NULL);
- }
-}
-
-void
util_primconvert_save_rasterizer_state(struct primconvert_context *pc,
const struct pipe_rasterizer_state
*rast)
{
/* if we actually translated the provoking vertex for the buffer,
* we would actually need to save/restore rasterizer state. As
* it is, we just need to make note of the pv.
*/
pc->api_pv = rast->flatshade_first ? PV_FIRST : PV_LAST;
}
void
util_primconvert_draw_vbo(struct primconvert_context *pc,
const struct pipe_draw_info *info)
{
- struct pipe_index_buffer *ib = &pc->saved_ib;
- struct pipe_index_buffer new_ib;
struct pipe_draw_info new_info;
struct pipe_transfer *src_transfer = NULL;
u_translate_func trans_func;
u_generate_func gen_func;
const void *src = NULL;
void *dst;
+ unsigned ib_offset;
- memset(&new_ib, 0, sizeof(new_ib));
util_draw_init_info(&new_info);
- new_info.indexed = true;
new_info.min_index = info->min_index;
new_info.max_index = info->max_index;
new_info.index_bias = info->index_bias;
new_info.start_instance = info->start_instance;
new_info.instance_count = info->instance_count;
new_info.primitive_restart = info->primitive_restart;
new_info.restart_index = info->restart_index;
- if (info->indexed) {
+ if (info->index_size) {
enum pipe_prim_type mode = 0;
+ unsigned index_size;
u_index_translator(pc->primtypes_mask,
- info->mode, pc->saved_ib.index_size, info->count,
+ info->mode, info->index_size, info->count,
pc->api_pv, pc->api_pv,
info->primitive_restart ? PR_ENABLE : PR_DISABLE,
- &mode, &new_ib.index_size, &new_info.count,
+ &mode, &index_size, &new_info.count,
&trans_func);
new_info.mode = mode;
- src = ib->user_buffer;
+ new_info.index_size = index_size;
+ src = info->has_user_indices ? info->index.user : NULL;
if (!src) {
- src = pipe_buffer_map(pc->pipe, ib->buffer,
+ src = pipe_buffer_map(pc->pipe, info->index.resource,
PIPE_TRANSFER_READ, &src_transfer);
}
- src = (const uint8_t *)src + ib->offset;
+ src = (const uint8_t *)src;
}
else {
enum pipe_prim_type mode = 0;
+ unsigned index_size;
u_index_generator(pc->primtypes_mask,
info->mode, info->start, info->count,
pc->api_pv, pc->api_pv,
- &mode, &new_ib.index_size, &new_info.count,
+ &mode, &index_size, &new_info.count,
&gen_func);
new_info.mode = mode;
+ new_info.index_size = index_size;
}
- u_upload_alloc(pc->pipe->stream_uploader, 0, new_ib.index_size * new_info.count, 4,
- &new_ib.offset, &new_ib.buffer, &dst);
+ u_upload_alloc(pc->pipe->stream_uploader, 0, new_info.index_size * new_info.count, 4,
+ &ib_offset, &new_info.index.resource, &dst);
+ new_info.start = ib_offset / new_info.index_size;
- if (info->indexed) {
+ if (info->index_size) {
trans_func(src, info->start, info->count, new_info.count, info->restart_index, dst);
}
else {
gen_func(info->start, new_info.count, dst);
}
if (src_transfer)
pipe_buffer_unmap(pc->pipe, src_transfer);
u_upload_unmap(pc->pipe->stream_uploader);
- /* bind new index buffer: */
- pc->pipe->set_index_buffer(pc->pipe, &new_ib);
-
/* to the translated draw: */
pc->pipe->draw_vbo(pc->pipe, &new_info);
- /* and then restore saved ib: */
- pc->pipe->set_index_buffer(pc->pipe, ib);
-
- pipe_resource_reference(&new_ib.buffer, NULL);
+ pipe_resource_reference(&new_info.index.resource, NULL);
}
diff --git a/src/gallium/auxiliary/indices/u_primconvert.h b/src/gallium/auxiliary/indices/u_primconvert.h
index 73ffea0..02ee063 100644
--- a/src/gallium/auxiliary/indices/u_primconvert.h
+++ b/src/gallium/auxiliary/indices/u_primconvert.h
@@ -27,19 +27,17 @@
#ifndef U_PRIMCONVERT_H_
#define U_PRIMCONVERT_H_
#include "pipe/p_state.h"
struct primconvert_context;
struct primconvert_context *util_primconvert_create(struct pipe_context *pipe,
uint32_t primtypes_mask);
void util_primconvert_destroy(struct primconvert_context *pc);
-void util_primconvert_save_index_buffer(struct primconvert_context *pc,
- const struct pipe_index_buffer *ib);
void util_primconvert_save_rasterizer_state(struct primconvert_context *pc,
const struct pipe_rasterizer_state
*rast);
void util_primconvert_draw_vbo(struct primconvert_context *pc,
const struct pipe_draw_info *info);
#endif /* U_PRIMCONVERT_H_ */
diff --git a/src/gallium/auxiliary/util/u_draw.c b/src/gallium/auxiliary/util/u_draw.c
index e7abbfc..a7590f7 100644
--- a/src/gallium/auxiliary/util/u_draw.c
+++ b/src/gallium/auxiliary/util/u_draw.c
@@ -129,21 +129,21 @@ util_draw_max_index(
/* This extracts the draw arguments from the info_in->indirect resource,
* puts them into a new instance of pipe_draw_info, and calls draw_vbo on it.
*/
void
util_draw_indirect(struct pipe_context *pipe,
const struct pipe_draw_info *info_in)
{
struct pipe_draw_info info;
struct pipe_transfer *transfer;
uint32_t *params;
- const unsigned num_params = info_in->indexed ? 5 : 4;
+ const unsigned num_params = info_in->index_size ? 5 : 4;
assert(info_in->indirect);
assert(!info_in->count_from_stream_output);
memcpy(&info, info_in, sizeof(info));
params = (uint32_t *)
pipe_buffer_map_range(pipe,
info_in->indirect->buffer,
info_in->indirect->offset,
@@ -151,18 +151,18 @@ util_draw_indirect(struct pipe_context *pipe,
PIPE_TRANSFER_READ,
&transfer);
if (!transfer) {
debug_printf("%s: failed to map indirect buffer\n", __FUNCTION__);
return;
}
info.count = params[0];
info.instance_count = params[1];
info.start = params[2];
- info.index_bias = info_in->indexed ? params[3] : 0;
- info.start_instance = info_in->indexed ? params[4] : params[3];
+ info.index_bias = info_in->index_size ? params[3] : 0;
+ info.start_instance = info_in->index_size ? params[4] : params[3];
info.indirect = NULL;
pipe_buffer_unmap(pipe, transfer);
pipe->draw_vbo(pipe, &info);
}
diff --git a/src/gallium/auxiliary/util/u_draw.h b/src/gallium/auxiliary/util/u_draw.h
index b6ea3de..e8af140 100644
--- a/src/gallium/auxiliary/util/u_draw.h
+++ b/src/gallium/auxiliary/util/u_draw.h
@@ -60,29 +60,29 @@ util_draw_arrays(struct pipe_context *pipe,
info.mode = mode;
info.start = start;
info.count = count;
info.min_index = start;
info.max_index = start + count - 1;
pipe->draw_vbo(pipe, &info);
}
static inline void
-util_draw_elements(struct pipe_context *pipe, int index_bias,
- enum pipe_prim_type mode,
+util_draw_elements(struct pipe_context *pipe, unsigned index_size,
+ int index_bias, enum pipe_prim_type mode,
uint start,
uint count)
{
struct pipe_draw_info info;
util_draw_init_info(&info);
- info.indexed = TRUE;
+ info.index_size = index_size;
info.mode = mode;
info.start = start;
info.count = count;
info.index_bias = index_bias;
pipe->draw_vbo(pipe, &info);
}
static inline void
util_draw_arrays_instanced(struct pipe_context *pipe,
@@ -101,31 +101,32 @@ util_draw_arrays_instanced(struct pipe_context *pipe,
info.start_instance = start_instance;
info.instance_count = instance_count;
info.min_index = start;
info.max_index = start + count - 1;
pipe->draw_vbo(pipe, &info);
}
static inline void
util_draw_elements_instanced(struct pipe_context *pipe,
+ unsigned index_size,
int index_bias,
enum pipe_prim_type mode,
uint start,
uint count,
uint start_instance,
uint instance_count)
{
struct pipe_draw_info info;
util_draw_init_info(&info);
- info.indexed = TRUE;
+ info.index_size = index_size;
info.mode = mode;
info.start = start;
info.count = count;
info.index_bias = index_bias;
info.start_instance = start_instance;
info.instance_count = instance_count;
pipe->draw_vbo(pipe, &info);
}
diff --git a/src/gallium/auxiliary/util/u_dump.h b/src/gallium/auxiliary/util/u_dump.h
index bce8517..a446074 100644
--- a/src/gallium/auxiliary/util/u_dump.h
+++ b/src/gallium/auxiliary/util/u_dump.h
@@ -166,23 +166,20 @@ util_dump_sampler_view(FILE *stream, const struct pipe_sampler_view *state);
void
util_dump_transfer(FILE *stream,
const struct pipe_transfer *state);
void
util_dump_constant_buffer(FILE *stream,
const struct pipe_constant_buffer *state);
void
-util_dump_index_buffer(FILE *stream, const struct pipe_index_buffer *state);
-
-void
util_dump_vertex_buffer(FILE *stream,
const struct pipe_vertex_buffer *state);
void
util_dump_vertex_element(FILE *stream,
const struct pipe_vertex_element *state);
void
util_dump_stream_output_target(FILE *stream,
const struct pipe_stream_output_target *state);
diff --git a/src/gallium/auxiliary/util/u_dump_state.c b/src/gallium/auxiliary/util/u_dump_state.c
index 9c32557..4fd2830 100644
--- a/src/gallium/auxiliary/util/u_dump_state.c
+++ b/src/gallium/auxiliary/util/u_dump_state.c
@@ -825,39 +825,20 @@ util_dump_constant_buffer(FILE *stream,
util_dump_member(stream, ptr, state, buffer);
util_dump_member(stream, uint, state, buffer_offset);
util_dump_member(stream, uint, state, buffer_size);
util_dump_member(stream, ptr, state, user_buffer);
util_dump_struct_end(stream);
}
void
-util_dump_index_buffer(FILE *stream, const struct pipe_index_buffer *state)
-{
- if (!state) {
- util_dump_null(stream);
- return;
- }
-
- util_dump_struct_begin(stream, "pipe_index_buffer");
-
- util_dump_member(stream, uint, state, index_size);
- util_dump_member(stream, uint, state, offset);
- util_dump_member(stream, ptr, state, buffer);
- util_dump_member(stream, ptr, state, user_buffer);
-
- util_dump_struct_end(stream);
-}
-
-
-void
util_dump_vertex_buffer(FILE *stream, const struct pipe_vertex_buffer *state)
{
if (!state) {
util_dump_null(stream);
return;
}
util_dump_struct_begin(stream, "pipe_vertex_buffer");
util_dump_member(stream, uint, state, stride);
@@ -910,40 +891,42 @@ util_dump_stream_output_target(FILE *stream,
void
util_dump_draw_info(FILE *stream, const struct pipe_draw_info *state)
{
if (!state) {
util_dump_null(stream);
return;
}
util_dump_struct_begin(stream, "pipe_draw_info");
- util_dump_member(stream, bool, state, indexed);
+ util_dump_member(stream, uint, state, index_size);
+ util_dump_member(stream, uint, state, has_user_indices);
util_dump_member(stream, enum_prim_mode, state, mode);
util_dump_member(stream, uint, state, start);
util_dump_member(stream, uint, state, count);
util_dump_member(stream, uint, state, start_instance);
util_dump_member(stream, uint, state, instance_count);
util_dump_member(stream, uint, state, drawid);
util_dump_member(stream, uint, state, vertices_per_patch);
util_dump_member(stream, int, state, index_bias);
util_dump_member(stream, uint, state, min_index);
util_dump_member(stream, uint, state, max_index);
util_dump_member(stream, bool, state, primitive_restart);
util_dump_member(stream, uint, state, restart_index);
+ util_dump_member(stream, ptr, state, index.resource);
util_dump_member(stream, ptr, state, count_from_stream_output);
if (!state->indirect) {
util_dump_member(stream, ptr, state, indirect);
} else {
util_dump_member(stream, uint, state, indirect->offset);
util_dump_member(stream, uint, state, indirect->stride);
util_dump_member(stream, uint, state, indirect->draw_count);
util_dump_member(stream, uint, state, indirect->indirect_draw_count_offset);
util_dump_member(stream, ptr, state, indirect->buffer);
diff --git a/src/gallium/auxiliary/util/u_helpers.c b/src/gallium/auxiliary/util/u_helpers.c
index f91cb0c..e0feade 100644
--- a/src/gallium/auxiliary/util/u_helpers.c
+++ b/src/gallium/auxiliary/util/u_helpers.c
@@ -91,62 +91,38 @@ void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
if (dst[i].buffer.resource)
enabled_buffers |= (1ull << i);
}
util_set_vertex_buffers_mask(dst, &enabled_buffers, src, start_slot,
count);
*dst_count = util_last_bit(enabled_buffers);
}
-
-void
-util_set_index_buffer(struct pipe_index_buffer *dst,
- const struct pipe_index_buffer *src)
-{
- if (src) {
- pipe_resource_reference(&dst->buffer, src->buffer);
- memcpy(dst, src, sizeof(*dst));
- }
- else {
- pipe_resource_reference(&dst->buffer, NULL);
- memset(dst, 0, sizeof(*dst));
- }
-}
-
/**
* Given a user index buffer, save the structure to "saved", and upload it.
*/
bool
-util_save_and_upload_index_buffer(struct pipe_context *pipe,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib,
- struct pipe_index_buffer *out_saved)
+util_upload_index_buffer(struct pipe_context *pipe,
+ const struct pipe_draw_info *info,
+ struct pipe_resource **out_buffer,
+ unsigned *out_offset)
{
- struct pipe_index_buffer new_ib = {0};
- unsigned start_offset = info->start * ib->index_size;
+ unsigned start_offset = info->start * info->index_size;
u_upload_data(pipe->stream_uploader, start_offset,
- info->count * ib->index_size, 4,
- (char*)ib->user_buffer + start_offset,
- &new_ib.offset, &new_ib.buffer);
- if (!new_ib.buffer)
- return false;
+ info->count * info->index_size, 4,
+ (char*)info->index.user + start_offset,
+ out_offset, out_buffer);
u_upload_unmap(pipe->stream_uploader);
-
- new_ib.offset -= start_offset;
- new_ib.index_size = ib->index_size;
-
- util_set_index_buffer(out_saved, ib);
- pipe->set_index_buffer(pipe, &new_ib);
- pipe_resource_reference(&new_ib.buffer, NULL);
- return true;
+ *out_offset -= start_offset;
+ return *out_buffer != NULL;
}
struct pipe_query *
util_begin_pipestat_query(struct pipe_context *ctx)
{
struct pipe_query *q =
ctx->create_query(ctx, PIPE_QUERY_PIPELINE_STATISTICS, 0);
if (!q)
return NULL;
diff --git a/src/gallium/auxiliary/util/u_helpers.h b/src/gallium/auxiliary/util/u_helpers.h
index 2b382a1..ab970d7 100644
--- a/src/gallium/auxiliary/util/u_helpers.h
+++ b/src/gallium/auxiliary/util/u_helpers.h
@@ -38,27 +38,24 @@ extern "C" {
void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
uint32_t *enabled_buffers,
const struct pipe_vertex_buffer *src,
unsigned start_slot, unsigned count);
void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
unsigned *dst_count,
const struct pipe_vertex_buffer *src,
unsigned start_slot, unsigned count);
-void util_set_index_buffer(struct pipe_index_buffer *dst,
- const struct pipe_index_buffer *src);
-
-bool util_save_and_upload_index_buffer(struct pipe_context *pipe,
- const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib,
- struct pipe_index_buffer *out_saved);
+bool util_upload_index_buffer(struct pipe_context *pipe,
+ const struct pipe_draw_info *info,
+ struct pipe_resource **out_buffer,
+ unsigned *out_offset);
struct pipe_query *
util_begin_pipestat_query(struct pipe_context *ctx);
void
util_end_pipestat_query(struct pipe_context *ctx, struct pipe_query *q,
FILE *f);
#ifdef __cplusplus
}
diff --git a/src/gallium/auxiliary/util/u_index_modify.c b/src/gallium/auxiliary/util/u_index_modify.c
index d86be24..4e9349a 100644
--- a/src/gallium/auxiliary/util/u_index_modify.c
+++ b/src/gallium/auxiliary/util/u_index_modify.c
@@ -20,104 +20,104 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "pipe/p_context.h"
#include "util/u_index_modify.h"
#include "util/u_inlines.h"
/* Ubyte indices. */
void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start,
unsigned count,
void *out)
{
struct pipe_transfer *src_transfer = NULL;
const unsigned char *in_map;
unsigned short *out_map = out;
unsigned i;
- if (ib->user_buffer) {
- in_map = ib->user_buffer;
+ if (info->has_user_indices) {
+ in_map = info->index.user;
} else {
- in_map = pipe_buffer_map(context, ib->buffer,
+ in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
add_transfer_flags,
&src_transfer);
}
in_map += start;
for (i = 0; i < count; i++) {
*out_map = (unsigned short)(*in_map + index_bias);
in_map++;
out_map++;
}
if (src_transfer)
pipe_buffer_unmap(context, src_transfer);
}
/* Ushort indices. */
void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start, unsigned count,
void *out)
{
struct pipe_transfer *in_transfer = NULL;
const unsigned short *in_map;
unsigned short *out_map = out;
unsigned i;
- if (ib->user_buffer) {
- in_map = ib->user_buffer;
+ if (info->has_user_indices) {
+ in_map = info->index.user;
} else {
- in_map = pipe_buffer_map(context, ib->buffer,
+ in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
add_transfer_flags,
&in_transfer);
}
in_map += start;
for (i = 0; i < count; i++) {
*out_map = (unsigned short)(*in_map + index_bias);
in_map++;
out_map++;
}
if (in_transfer)
pipe_buffer_unmap(context, in_transfer);
}
/* Uint indices. */
void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start, unsigned count,
void *out)
{
struct pipe_transfer *in_transfer = NULL;
const unsigned int *in_map;
unsigned int *out_map = out;
unsigned i;
- if (ib->user_buffer) {
- in_map = ib->user_buffer;
+ if (info->has_user_indices) {
+ in_map = info->index.user;
} else {
- in_map = pipe_buffer_map(context, ib->buffer,
+ in_map = pipe_buffer_map(context, info->index.resource,
PIPE_TRANSFER_READ |
add_transfer_flags,
&in_transfer);
}
in_map += start;
for (i = 0; i < count; i++) {
*out_map = (unsigned int)(*in_map + index_bias);
in_map++;
out_map++;
diff --git a/src/gallium/auxiliary/util/u_index_modify.h b/src/gallium/auxiliary/util/u_index_modify.h
index d009199..ba96725 100644
--- a/src/gallium/auxiliary/util/u_index_modify.h
+++ b/src/gallium/auxiliary/util/u_index_modify.h
@@ -18,35 +18,34 @@
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef UTIL_INDEX_MODIFY_H
#define UTIL_INDEX_MODIFY_H
struct pipe_context;
struct pipe_resource;
-struct pipe_index_buffer;
void util_shorten_ubyte_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start,
unsigned count,
void *out);
void util_rebuild_ushort_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start, unsigned count,
void *out);
void util_rebuild_uint_elts_to_userptr(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
unsigned add_transfer_flags,
int index_bias,
unsigned start, unsigned count,
void *out);
#endif
diff --git a/src/gallium/auxiliary/util/u_prim_restart.c b/src/gallium/auxiliary/util/u_prim_restart.c
index e45aa56..b7675fa 100644
--- a/src/gallium/auxiliary/util/u_prim_restart.c
+++ b/src/gallium/auxiliary/util/u_prim_restart.c
@@ -32,84 +32,82 @@
/**
* Translate an index buffer for primitive restart.
* Create a new index buffer which is a copy of the original index buffer
* except that instances of 'restart_index' are converted to 0xffff or
* 0xffffffff.
* Also, index buffers using 1-byte indexes are converted to 2-byte indexes.
*/
enum pipe_error
util_translate_prim_restart_ib(struct pipe_context *context,
- struct pipe_index_buffer *src_buffer,
- struct pipe_resource **dst_buffer,
- unsigned num_indexes,
- unsigned restart_index)
+ const struct pipe_draw_info *info,
+ struct pipe_resource **dst_buffer)
{
struct pipe_screen *screen = context->screen;
struct pipe_transfer *src_transfer = NULL, *dst_transfer = NULL;
void *src_map = NULL, *dst_map = NULL;
- const unsigned src_index_size = src_buffer->index_size;
+ const unsigned src_index_size = info->index_size;
unsigned dst_index_size;
/* 1-byte indexes are converted to 2-byte indexes, 4-byte stays 4-byte */
- dst_index_size = MAX2(2, src_buffer->index_size);
+ dst_index_size = MAX2(2, info->index_size);
assert(dst_index_size == 2 || dst_index_size == 4);
/* no user buffers for now */
- assert(src_buffer->user_buffer == NULL);
+ assert(!info->has_user_indices);
/* Create new index buffer */
*dst_buffer = pipe_buffer_create(screen, PIPE_BIND_INDEX_BUFFER,
PIPE_USAGE_STREAM,
- num_indexes * dst_index_size);
+ info->count * dst_index_size);
if (!*dst_buffer)
goto error;
/* Map new / dest index buffer */
dst_map = pipe_buffer_map(context, *dst_buffer,
PIPE_TRANSFER_WRITE, &dst_transfer);
if (!dst_map)
goto error;
/* Map original / src index buffer */
- src_map = pipe_buffer_map_range(context, src_buffer->buffer,
- src_buffer->offset,
- num_indexes * src_index_size,
+ src_map = pipe_buffer_map_range(context, info->index.resource,
+ info->start * src_index_size,
+ info->count * src_index_size,
PIPE_TRANSFER_READ,
&src_transfer);
if (!src_map)
goto error;
if (src_index_size == 1 && dst_index_size == 2) {
uint8_t *src = (uint8_t *) src_map;
uint16_t *dst = (uint16_t *) dst_map;
unsigned i;
- for (i = 0; i < num_indexes; i++) {
- dst[i] = (src[i] == restart_index) ? 0xffff : src[i];
+ for (i = 0; i < info->count; i++) {
+ dst[i] = (src[i] == info->restart_index) ? 0xffff : src[i];
}
}
else if (src_index_size == 2 && dst_index_size == 2) {
uint16_t *src = (uint16_t *) src_map;
uint16_t *dst = (uint16_t *) dst_map;
unsigned i;
- for (i = 0; i < num_indexes; i++) {
- dst[i] = (src[i] == restart_index) ? 0xffff : src[i];
+ for (i = 0; i < info->count; i++) {
+ dst[i] = (src[i] == info->restart_index) ? 0xffff : src[i];
}
}
else {
uint32_t *src = (uint32_t *) src_map;
uint32_t *dst = (uint32_t *) dst_map;
unsigned i;
assert(src_index_size == 4);
assert(dst_index_size == 4);
- for (i = 0; i < num_indexes; i++) {
- dst[i] = (src[i] == restart_index) ? 0xffffffff : src[i];
+ for (i = 0; i < info->count; i++) {
+ dst[i] = (src[i] == info->restart_index) ? 0xffffffff : src[i];
}
}
pipe_buffer_unmap(context, src_transfer);
pipe_buffer_unmap(context, dst_transfer);
return PIPE_OK;
error:
if (src_transfer)
@@ -170,52 +168,50 @@ add_range(struct range_info *info, unsigned start, unsigned count)
/**
* Implement primitive restart by breaking an indexed primitive into
* pieces which do not contain restart indexes. Each piece is then
* drawn by calling pipe_context::draw_vbo().
* \return PIPE_OK if no error, an error code otherwise.
*/
enum pipe_error
util_draw_vbo_without_prim_restart(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
const struct pipe_draw_info *info)
{
const void *src_map;
struct range_info ranges = {0};
struct pipe_draw_info new_info;
struct pipe_transfer *src_transfer = NULL;
unsigned i, start, count;
- assert(info->indexed);
+ assert(info->index_size);
assert(info->primitive_restart);
/* Get pointer to the index data */
- if (ib->buffer) {
+ if (!info->has_user_indices) {
/* map the index buffer (only the range we need to scan) */
- src_map = pipe_buffer_map_range(context, ib->buffer,
- ib->offset + info->start * ib->index_size,
- info->count * ib->index_size,
+ src_map = pipe_buffer_map_range(context, info->index.resource,
+ info->start * info->index_size,
+ info->count * info->index_size,
PIPE_TRANSFER_READ,
&src_transfer);
if (!src_map) {
return PIPE_ERROR_OUT_OF_MEMORY;
}
}
else {
- if (!ib->user_buffer) {
+ if (!info->index.user) {
debug_printf("User-space index buffer is null!");
return PIPE_ERROR_BAD_INPUT;
}
- src_map = (const uint8_t *) ib->user_buffer
- + ib->offset
- + info->start * ib->index_size;
+ src_map = (const uint8_t *) info->index.user
+ + info->start * info->index_size;
}
#define SCAN_INDEXES(TYPE) \
for (i = 0; i <= info->count; i++) { \
if (i == info->count || \
((const TYPE *) src_map)[i] == info->restart_index) { \
/* cut / restart */ \
if (count > 0) { \
if (!add_range(&ranges, info->start + start, count)) { \
if (src_transfer) \
@@ -226,21 +222,21 @@ util_draw_vbo_without_prim_restart(struct pipe_context *context,
start = i + 1; \
count = 0; \
} \
else { \
count++; \
} \
}
start = info->start;
count = 0;
- switch (ib->index_size) {
+ switch (info->index_size) {
case 1:
SCAN_INDEXES(uint8_t);
break;
case 2:
SCAN_INDEXES(uint16_t);
break;
case 4:
SCAN_INDEXES(uint32_t);
break;
default:
diff --git a/src/gallium/auxiliary/util/u_prim_restart.h b/src/gallium/auxiliary/util/u_prim_restart.h
index 1e98e0e..0e17ce5 100644
--- a/src/gallium/auxiliary/util/u_prim_restart.h
+++ b/src/gallium/auxiliary/util/u_prim_restart.h
@@ -31,32 +31,29 @@
#include "pipe/p_defines.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pipe_context;
struct pipe_draw_info;
-struct pipe_index_buffer;
+union pipe_index_binding;
struct pipe_resource;
enum pipe_error
util_translate_prim_restart_ib(struct pipe_context *context,
- struct pipe_index_buffer *src_buffer,
- struct pipe_resource **dst_buffer,
- unsigned num_indexes,
- unsigned restart_index);
+ const struct pipe_draw_info *info,
+ struct pipe_resource **dst_buffer);
enum pipe_error
util_draw_vbo_without_prim_restart(struct pipe_context *context,
- const struct pipe_index_buffer *ib,
const struct pipe_draw_info *info);
#ifdef __cplusplus
}
#endif
#endif
diff --git a/src/gallium/auxiliary/util/u_vbuf.c b/src/gallium/auxiliary/util/u_vbuf.c
index 9d6d529..8361e64 100644
--- a/src/gallium/auxiliary/util/u_vbuf.c
+++ b/src/gallium/auxiliary/util/u_vbuf.c
@@ -155,23 +155,20 @@ struct u_vbuf {
/* Saved vertex buffer. */
unsigned aux_vertex_buffer_slot;
struct pipe_vertex_buffer aux_vertex_buffer_saved;
/* Vertex buffers for the driver.
* There are usually no user buffers. */
struct pipe_vertex_buffer real_vertex_buffer[PIPE_MAX_ATTRIBS];
uint32_t dirty_real_vb_mask; /* which buffers are dirty since the last
call of set_vertex_buffers */
- /* The index buffer. */
- struct pipe_index_buffer index_buffer;
-
/* Vertex elements. */
struct u_vbuf_elements *ve, *ve_saved;
/* Vertex elements used for the translate fallback. */
struct pipe_vertex_element fallback_velems[PIPE_MAX_ATTRIBS];
/* If non-NULL, this is a vertex element state used for the translate
* fallback and therefore used for rendering too. */
boolean using_translate;
/* The vertex buffer slot index where translated vertices have been
* stored in. */
@@ -365,43 +362,40 @@ void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count,
mgr->ve = u_vbuf_set_vertex_elements_internal(mgr, count, states);
}
void u_vbuf_destroy(struct u_vbuf *mgr)
{
struct pipe_screen *screen = mgr->pipe->screen;
unsigned i;
unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX,
PIPE_SHADER_CAP_MAX_INPUTS);
- mgr->pipe->set_index_buffer(mgr->pipe, NULL);
- pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
-
mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[i]);
pipe_vertex_buffer_unreference(&mgr->aux_vertex_buffer_saved);
translate_cache_destroy(mgr->translate_cache);
cso_cache_delete(mgr->cso_cache);
FREE(mgr);
}
static enum pipe_error
u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
+ const struct pipe_draw_info *info,
unsigned vb_mask, unsigned out_vb,
int start_vertex, unsigned num_vertices,
- int start_index, unsigned num_indices, int min_index,
- boolean unroll_indices)
+ int min_index, boolean unroll_indices)
{
struct translate *tr;
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0};
struct pipe_resource *out_buffer = NULL;
uint8_t *out_map;
unsigned out_offset, mask;
/* Get a translate object. */
tr = translate_cache_find(mgr->translate_cache, key);
@@ -433,52 +427,49 @@ u_vbuf_translate_buffers(struct u_vbuf *mgr, struct translate_key *key,
/* Subtract min_index so that indexing with the index buffer works. */
if (unroll_indices) {
map -= (ptrdiff_t)vb->stride * min_index;
}
tr->set_buffer(tr, i, map, vb->stride, ~0);
}
/* Translate. */
if (unroll_indices) {
- struct pipe_index_buffer *ib = &mgr->index_buffer;
struct pipe_transfer *transfer = NULL;
- unsigned offset = ib->offset + start_index * ib->index_size;
+ unsigned offset = info->start * info->index_size;
uint8_t *map;
- assert((ib->buffer || ib->user_buffer) && ib->index_size);
-
/* Create and map the output buffer. */
u_upload_alloc(mgr->pipe->stream_uploader, 0,
- key->output_stride * num_indices, 4,
+ key->output_stride * info->count, 4,
&out_offset, &out_buffer,
(void**)&out_map);
if (!out_buffer)
return PIPE_ERROR_OUT_OF_MEMORY;
- if (ib->user_buffer) {
- map = (uint8_t*)ib->user_buffer + offset;
+ if (info->has_user_indices) {
+ map = (uint8_t*)info->index.user + offset;
} else {
- map = pipe_buffer_map_range(mgr->pipe, ib->buffer, offset,
- num_indices * ib->index_size,
+ map = pipe_buffer_map_range(mgr->pipe, info->index.resource, offset,
+ info->count * info->index_size,
PIPE_TRANSFER_READ, &transfer);
}
- switch (ib->index_size) {
+ switch (info->index_size) {
case 4:
- tr->run_elts(tr, (unsigned*)map, num_indices, 0, 0, out_map);
+ tr->run_elts(tr, (unsigned*)map, info->count, 0, 0, out_map);
break;
case 2:
- tr->run_elts16(tr, (uint16_t*)map, num_indices, 0, 0, out_map);
+ tr->run_elts16(tr, (uint16_t*)map, info->count, 0, 0, out_map);
break;
case 1:
- tr->run_elts8(tr, map, num_indices, 0, 0, out_map);
+ tr->run_elts8(tr, map, info->count, 0, 0, out_map);
break;
}
if (transfer) {
pipe_buffer_unmap(mgr->pipe, transfer);
}
} else {
/* Create and map the output buffer. */
u_upload_alloc(mgr->pipe->stream_uploader,
key->output_stride * start_vertex,
@@ -549,42 +540,41 @@ u_vbuf_translate_find_free_vb_slots(struct u_vbuf *mgr,
mgr->dirty_real_vb_mask |= 1 << fallback_vbs[type];
}
}
memcpy(mgr->fallback_vbs, fallback_vbs, sizeof(fallback_vbs));
return TRUE;
}
static boolean
u_vbuf_translate_begin(struct u_vbuf *mgr,
+ const struct pipe_draw_info *info,
int start_vertex, unsigned num_vertices,
- int start_instance, unsigned num_instances,
- int start_index, unsigned num_indices, int min_index,
- boolean unroll_indices)
+ int min_index, boolean unroll_indices)
{
unsigned mask[VB_NUM] = {0};
struct translate_key key[VB_NUM];
unsigned elem_index[VB_NUM][PIPE_MAX_ATTRIBS]; /* ... into key.elements */
unsigned i, type;
unsigned incompatible_vb_mask = mgr->incompatible_vb_mask &
mgr->ve->used_vb_mask;
int start[VB_NUM] = {
- start_vertex, /* VERTEX */
- start_instance, /* INSTANCE */
- 0 /* CONST */
+ start_vertex, /* VERTEX */
+ info->start_instance, /* INSTANCE */
+ 0 /* CONST */
};
unsigned num[VB_NUM] = {
- num_vertices, /* VERTEX */
- num_instances, /* INSTANCE */
- 1 /* CONST */
+ num_vertices, /* VERTEX */
+ info->instance_count, /* INSTANCE */
+ 1 /* CONST */
};
memset(key, 0, sizeof(key));
memset(elem_index, ~0, sizeof(elem_index));
/* See if there are vertex attribs of each type to translate and
* which ones. */
for (i = 0; i < mgr->ve->count; i++) {
unsigned vb_index = mgr->ve->ve[i].vertex_buffer_index;
@@ -657,24 +647,23 @@ u_vbuf_translate_begin(struct u_vbuf *mgr,
te->output_offset = k->output_stride;
k->output_stride += mgr->ve->native_format_size[i];
k->nr_elements++;
}
/* Translate buffers. */
for (type = 0; type < VB_NUM; type++) {
if (key[type].nr_elements) {
enum pipe_error err;
- err = u_vbuf_translate_buffers(mgr, &key[type], mask[type],
+ err = u_vbuf_translate_buffers(mgr, &key[type], info, mask[type],
mgr->fallback_vbs[type],
- start[type], num[type],
- start_index, num_indices, min_index,
+ start[type], num[type], min_index,
unroll_indices && type == VB_VERTEX);
if (err != PIPE_OK)
return FALSE;
/* Fixup the stride for constant attribs. */
if (type == VB_CONST) {
mgr->real_vertex_buffer[mgr->fallback_vbs[VB_CONST]].stride = 0;
}
}
}
@@ -882,36 +871,20 @@ void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
mgr->user_vb_mask |= user_vb_mask;
mgr->incompatible_vb_mask |= incompatible_vb_mask;
mgr->nonzero_stride_vb_mask |= nonzero_stride_vb_mask;
mgr->enabled_vb_mask |= enabled_vb_mask;
/* All changed buffers are marked as dirty, even the NULL ones,
* which will cause the NULL buffers to be unbound in the driver later. */
mgr->dirty_real_vb_mask |= ~mask;
}
-void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
- const struct pipe_index_buffer *ib)
-{
- struct pipe_context *pipe = mgr->pipe;
-
- if (ib) {
- assert(ib->offset % ib->index_size == 0);
- pipe_resource_reference(&mgr->index_buffer.buffer, ib->buffer);
- memcpy(&mgr->index_buffer, ib, sizeof(*ib));
- } else {
- pipe_resource_reference(&mgr->index_buffer.buffer, NULL);
- }
-
- pipe->set_index_buffer(pipe, ib);
-}
-
static enum pipe_error
u_vbuf_upload_buffers(struct u_vbuf *mgr,
int start_vertex, unsigned num_vertices,
int start_instance, unsigned num_instances)
{
unsigned i;
unsigned nr_velems = mgr->ve->count;
struct pipe_vertex_element *velems =
mgr->using_translate ? mgr->fallback_velems : mgr->ve->ve;
unsigned start_offset[PIPE_MAX_ATTRIBS];
@@ -1016,100 +989,96 @@ static boolean u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf *mgr)
* be way more costly than this. */
return (mgr->ve->used_vb_mask &
(~mgr->user_vb_mask &
~mgr->incompatible_vb_mask &
mgr->ve->compatible_vb_mask_all &
mgr->ve->noninstance_vb_mask_any &
mgr->nonzero_stride_vb_mask)) != 0;
}
static void u_vbuf_get_minmax_index(struct pipe_context *pipe,
- struct pipe_index_buffer *ib,
- boolean primitive_restart,
- unsigned restart_index,
- unsigned start, unsigned count,
- int *out_min_index,
- int *out_max_index)
+ const struct pipe_draw_info *info,
+ int *out_min_index, int *out_max_index)
{
struct pipe_transfer *transfer = NULL;
const void *indices;
unsigned i;
- if (ib->user_buffer) {
- indices = (uint8_t*)ib->user_buffer +
- ib->offset + start * ib->index_size;
+ if (info->has_user_indices) {
+ indices = (uint8_t*)info->index.user +
+ info->start * info->index_size;
} else {
- indices = pipe_buffer_map_range(pipe, ib->buffer,
- ib->offset + start * ib->index_size,
- count * ib->index_size,
+ indices = pipe_buffer_map_range(pipe, info->index.resource,
+ info->start * info->index_size,
+ info->count * info->index_size,
PIPE_TRANSFER_READ, &transfer);
}
- switch (ib->index_size) {
+ switch (info->index_size) {
case 4: {
const unsigned *ui_indices = (const unsigned*)indices;
unsigned max_ui = 0;
unsigned min_ui = ~0U;
- if (primitive_restart) {
- for (i = 0; i < count; i++) {
- if (ui_indices[i] != restart_index) {
+ if (info->primitive_restart) {
+ for (i = 0; i < info->count; i++) {
+ if (ui_indices[i] != info->restart_index) {
if (ui_indices[i] > max_ui) max_ui = ui_indices[i];
if (ui_indices[i] < min_ui) min_ui = ui_indices[i];
}
}
}
else {
- for (i = 0; i < count; i++) {
+ for (i = 0; i < info->count; i++) {
if (ui_indices[i] > max_ui) max_ui = ui_indices[i];
if (ui_indices[i] < min_ui) min_ui = ui_indices[i];
}
}
*out_min_index = min_ui;
*out_max_index = max_ui;
break;
}
case 2: {
const unsigned short *us_indices = (const unsigned short*)indices;
unsigned max_us = 0;
unsigned min_us = ~0U;
- if (primitive_restart) {
- for (i = 0; i < count; i++) {
- if (us_indices[i] != restart_index) {
+ if (info->primitive_restart) {
+ for (i = 0; i < info->count; i++) {
+ if (us_indices[i] != info->restart_index) {
if (us_indices[i] > max_us) max_us = us_indices[i];
if (us_indices[i] < min_us) min_us = us_indices[i];
}
}
}
else {
- for (i = 0; i < count; i++) {
+ for (i = 0; i < info->count; i++) {
if (us_indices[i] > max_us) max_us = us_indices[i];
if (us_indices[i] < min_us) min_us = us_indices[i];
}
}
*out_min_index = min_us;
*out_max_index = max_us;
break;
}
case 1: {
const unsigned char *ub_indices = (const unsigned char*)indices;
unsigned max_ub = 0;
unsigned min_ub = ~0U;
- if (primitive_restart) {
- for (i = 0; i < count; i++) {
- if (ub_indices[i] != restart_index) {
+ if (info->primitive_restart) {
+ for (i = 0; i < info->count; i++) {
+ if (ub_indices[i] != info->restart_index) {
if (ub_indices[i] > max_ub) max_ub = ub_indices[i];
if (ub_indices[i] < min_ub) min_ub = ub_indices[i];
}
}
}
else {
- for (i = 0; i < count; i++) {
+ for (i = 0; i < info->count; i++) {
if (ub_indices[i] > max_ub) max_ub = ub_indices[i];
if (ub_indices[i] < min_ub) min_ub = ub_indices[i];
}
}
*out_min_index = min_ub;
*out_max_index = max_ub;
break;
}
default:
assert(0);
@@ -1160,54 +1129,52 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
return;
}
new_info = *info;
/* Fallback. We need to know all the parameters. */
if (new_info.indirect) {
struct pipe_transfer *transfer = NULL;
int *data;
- if (new_info.indexed) {
+ if (new_info.index_size) {
data = pipe_buffer_map_range(pipe, new_info.indirect->buffer,
new_info.indirect->offset, 20,
PIPE_TRANSFER_READ, &transfer);
new_info.index_bias = data[3];
new_info.start_instance = data[4];
}
else {
data = pipe_buffer_map_range(pipe, new_info.indirect->buffer,
new_info.indirect->offset, 16,
PIPE_TRANSFER_READ, &transfer);
new_info.start_instance = data[3];
}
new_info.count = data[0];
new_info.instance_count = data[1];
new_info.start = data[2];
pipe_buffer_unmap(pipe, transfer);
new_info.indirect = NULL;
}
- if (new_info.indexed) {
+ if (new_info.index_size) {
/* See if anything needs to be done for per-vertex attribs. */
if (u_vbuf_need_minmax_index(mgr)) {
int max_index;
if (new_info.max_index != ~0u) {
min_index = new_info.min_index;
max_index = new_info.max_index;
} else {
- u_vbuf_get_minmax_index(mgr->pipe, &mgr->index_buffer,
- new_info.primitive_restart,
- new_info.restart_index, new_info.start,
- new_info.count, &min_index, &max_index);
+ u_vbuf_get_minmax_index(mgr->pipe, &new_info,
+ &min_index, &max_index);
}
assert(min_index <= max_index);
start_vertex = min_index + new_info.index_bias;
num_vertices = max_index + 1 - min_index;
/* Primitive restart doesn't work when unrolling indices.
* We would have to break this drawing operation into several ones. */
/* Use some heuristic to see if unrolling indices improves
@@ -1229,30 +1196,28 @@ void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info)
} else {
start_vertex = new_info.start;
num_vertices = new_info.count;
min_index = 0;
}
/* Translate vertices with non-native layouts or formats. */
if (unroll_indices ||
incompatible_vb_mask ||
mgr->ve->incompatible_elem_mask) {
- if (!u_vbuf_translate_begin(mgr, start_vertex, num_vertices,
- new_info.start_instance,
- new_info.instance_count, new_info.start,
- new_info.count, min_index, unroll_indices)) {
+ if (!u_vbuf_translate_begin(mgr, &new_info, start_vertex, num_vertices,
+ min_index, unroll_indices)) {
debug_warn_once("u_vbuf_translate_begin() failed");
return;
}
if (unroll_indices) {
- new_info.indexed = FALSE;
+ new_info.index_size = 0;
new_info.index_bias = 0;
new_info.min_index = 0;
new_info.max_index = new_info.count - 1;
new_info.start = 0;
}
user_vb_mask &= ~(incompatible_vb_mask |
mgr->ve->incompatible_vb_mask_all);
}
diff --git a/src/gallium/auxiliary/util/u_vbuf.h b/src/gallium/auxiliary/util/u_vbuf.h
index ddfa844..d070452 100644
--- a/src/gallium/auxiliary/util/u_vbuf.h
+++ b/src/gallium/auxiliary/util/u_vbuf.h
@@ -65,21 +65,19 @@ u_vbuf_create(struct pipe_context *pipe,
struct u_vbuf_caps *caps, unsigned aux_vertex_buffer_index);
void u_vbuf_destroy(struct u_vbuf *mgr);
/* State and draw functions. */
void u_vbuf_set_vertex_elements(struct u_vbuf *mgr, unsigned count,
const struct pipe_vertex_element *states);
void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *bufs);
-void u_vbuf_set_index_buffer(struct u_vbuf *mgr,
- const struct pipe_index_buffer *ib);
void u_vbuf_draw_vbo(struct u_vbuf *mgr, const struct pipe_draw_info *info);
/* Save/restore functionality. */
void u_vbuf_save_vertex_elements(struct u_vbuf *mgr);
void u_vbuf_restore_vertex_elements(struct u_vbuf *mgr);
void u_vbuf_save_aux_vertex_buffer_slot(struct u_vbuf *mgr);
void u_vbuf_restore_aux_vertex_buffer_slot(struct u_vbuf *mgr);
#endif
diff --git a/src/gallium/docs/source/context.rst b/src/gallium/docs/source/context.rst
index 5949ff2..9bcc0e6 100644
--- a/src/gallium/docs/source/context.rst
+++ b/src/gallium/docs/source/context.rst
@@ -46,22 +46,20 @@ buffers, surfaces) are bound to the driver.
* ``set_constant_buffer`` sets a constant buffer to be used for a given shader
type. index is used to indicate which buffer to set (some apis may allow
multiple ones to be set, and binding a specific one later, though drivers
are mostly restricted to the first one right now).
* ``set_framebuffer_state``
* ``set_vertex_buffers``
-* ``set_index_buffer``
-
Non-CSO State
^^^^^^^^^^^^^
These pieces of state are too small, variable, and/or trivial to have CSO
objects. They all follow simple, one-method binding calls, e.g.
``set_blend_color``.
* ``set_stencil_ref`` sets the stencil front and back reference values
which are used as comparison values in stencil test.
@@ -283,22 +281,22 @@ Drawing
``draw_vbo`` draws a specified primitive. The primitive mode and other
properties are described by ``pipe_draw_info``.
The ``mode``, ``start``, and ``count`` fields of ``pipe_draw_info`` specify the
the mode of the primitive and the vertices to be fetched, in the range between
``start`` to ``start``+``count``-1, inclusive.
Every instance with instanceID in the range between ``start_instance`` and
``start_instance``+``instance_count``-1, inclusive, will be drawn.
-If there is an index buffer bound, and ``indexed`` field is true, all vertex
-indices will be looked up in the index buffer.
+If ``index_size`` != 0, all vertex indices will be looked up from the index
+buffer.
In indexed draw, ``min_index`` and ``max_index`` respectively provide a lower
and upper bound of the indices contained in the index buffer inside the range
between ``start`` to ``start``+``count``-1. This allows the driver to
determine which subset of vertices will be referenced during te draw call
without having to scan the index buffer. Providing a over-estimation of the
the true bounds, for example, a ``min_index`` and ``max_index`` of 0 and
0xffffffff respectively, must give exactly the same rendering, albeit with less
performance due to unreferenced vertex buffers being unnecessarily DMA'ed or
processed. Providing a underestimation of the true bounds will result in
diff --git a/src/gallium/drivers/ddebug/dd_context.c b/src/gallium/drivers/ddebug/dd_context.c
index 723e90e..24513f1 100644
--- a/src/gallium/drivers/ddebug/dd_context.c
+++ b/src/gallium/drivers/ddebug/dd_context.c
@@ -569,31 +569,20 @@ dd_context_set_vertex_buffers(struct pipe_context *_pipe,
{
struct dd_context *dctx = dd_context(_pipe);
struct pipe_context *pipe = dctx->pipe;
safe_memcpy(&dctx->draw_state.vertex_buffers[start], buffers,
sizeof(buffers[0]) * num_buffers);
pipe->set_vertex_buffers(pipe, start, num_buffers, buffers);
}
static void
-dd_context_set_index_buffer(struct pipe_context *_pipe,
- const struct pipe_index_buffer *ib)
-{
- struct dd_context *dctx = dd_context(_pipe);
- struct pipe_context *pipe = dctx->pipe;
-
- safe_memcpy(&dctx->draw_state.index_buffer, ib, sizeof(*ib));
- pipe->set_index_buffer(pipe, ib);
-}
-
-static void
dd_context_set_stream_output_targets(struct pipe_context *_pipe,
unsigned num_targets,
struct pipe_stream_output_target **tgs,
const unsigned *offsets)
{
struct dd_context *dctx = dd_context(_pipe);
struct pipe_context *pipe = dctx->pipe;
struct dd_draw_state *dstate = &dctx->draw_state;
dstate->num_so_targets = num_targets;
@@ -843,21 +832,20 @@ dd_context_create(struct dd_screen *dscreen, struct pipe_context *pipe)
CTX_INIT(set_constant_buffer);
CTX_INIT(set_framebuffer_state);
CTX_INIT(set_polygon_stipple);
CTX_INIT(set_scissor_states);
CTX_INIT(set_viewport_states);
CTX_INIT(set_sampler_views);
CTX_INIT(set_tess_state);
CTX_INIT(set_shader_buffers);
CTX_INIT(set_shader_images);
CTX_INIT(set_vertex_buffers);
- CTX_INIT(set_index_buffer);
CTX_INIT(create_stream_output_target);
CTX_INIT(stream_output_target_destroy);
CTX_INIT(set_stream_output_targets);
CTX_INIT(create_sampler_view);
CTX_INIT(sampler_view_destroy);
CTX_INIT(create_surface);
CTX_INIT(surface_destroy);
CTX_INIT(transfer_map);
CTX_INIT(transfer_flush_region);
CTX_INIT(transfer_unmap);
diff --git a/src/gallium/drivers/ddebug/dd_draw.c b/src/gallium/drivers/ddebug/dd_draw.c
index 7ffbb44..43535e2 100644
--- a/src/gallium/drivers/ddebug/dd_draw.c
+++ b/src/gallium/drivers/ddebug/dd_draw.c
@@ -203,25 +203,20 @@ dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE
const char *shader_str[PIPE_SHADER_TYPES];
shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
DUMP(draw_info, info);
- if (info->indexed) {
- DUMP(index_buffer, &dstate->index_buffer);
- if (dstate->index_buffer.buffer)
- DUMP_M(resource, &dstate->index_buffer, buffer);
- }
if (info->count_from_stream_output)
DUMP_M(stream_output_target, info,
count_from_stream_output);
if (info->indirect) {
DUMP_M(resource, info, indirect->buffer);
if (info->indirect->indirect_draw_count)
DUMP_M(resource, info, indirect->indirect_draw_count);
}
fprintf(f, "\n");
@@ -605,20 +600,25 @@ dd_flush_and_handle_hang(struct dd_context *dctx,
}
static void
dd_unreference_copy_of_call(struct dd_call *dst)
{
switch (dst->type) {
case CALL_DRAW_VBO:
pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output, NULL);
pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
+ if (dst->info.draw_vbo.draw.index_size &&
+ !dst->info.draw_vbo.draw.has_user_indices)
+ pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
+ else
+ dst->info.draw_vbo.draw.index.user = NULL;
break;
case CALL_LAUNCH_GRID:
pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
break;
case CALL_RESOURCE_COPY_REGION:
pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
break;
case CALL_BLIT:
pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
@@ -650,20 +650,33 @@ dd_copy_call(struct dd_call *dst, struct dd_call *src)
dst->type = src->type;
switch (src->type) {
case CALL_DRAW_VBO:
pipe_so_target_reference(&dst->info.draw_vbo.draw.count_from_stream_output,
src->info.draw_vbo.draw.count_from_stream_output);
pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer,
src->info.draw_vbo.indirect.buffer);
pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count,
src->info.draw_vbo.indirect.indirect_draw_count);
+
+ if (dst->info.draw_vbo.draw.index_size &&
+ !dst->info.draw_vbo.draw.has_user_indices)
+ pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource, NULL);
+ else
+ dst->info.draw_vbo.draw.index.user = NULL;
+
+ if (src->info.draw_vbo.draw.index_size &&
+ !src->info.draw_vbo.draw.has_user_indices) {
+ pipe_resource_reference(&dst->info.draw_vbo.draw.index.resource,
+ src->info.draw_vbo.draw.index.resource);
+ }
+
dst->info.draw_vbo = src->info.draw_vbo;
if (!src->info.draw_vbo.draw.indirect)
dst->info.draw_vbo.draw.indirect = NULL;
else
dst->info.draw_vbo.draw.indirect = &dst->info.draw_vbo.indirect;
break;
case CALL_LAUNCH_GRID:
pipe_resource_reference(&dst->info.launch_grid.indirect,
src->info.launch_grid.indirect);
dst->info.launch_grid = src->info.launch_grid;
@@ -709,22 +722,20 @@ dd_copy_call(struct dd_call *dst, struct dd_call *src)
}
static void
dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
{
unsigned i,j;
/* Just clear pointers to gallium objects. Don't clear the whole structure,
* because it would kill performance with its size of 130 KB.
*/
- memset(&state->base.index_buffer, 0,
- sizeof(state->base.index_buffer));
memset(state->base.vertex_buffers, 0,
sizeof(state->base.vertex_buffers));
memset(state->base.so_targets, 0,
sizeof(state->base.so_targets));
memset(state->base.constant_buffers, 0,
sizeof(state->base.constant_buffers));
memset(state->base.sampler_views, 0,
sizeof(state->base.sampler_views));
memset(state->base.shader_images, 0,
sizeof(state->base.shader_images));
@@ -748,22 +759,20 @@ dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
state->base.dsa = &state->dsa;
state->base.blend = &state->blend;
}
static void
dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
{
struct dd_draw_state *dst = &state->base;
unsigned i,j;
- util_set_index_buffer(&dst->index_buffer, NULL);
-
for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
pipe_so_target_reference(&dst->so_targets[i], NULL);
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
if (dst->shaders[i])
tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
@@ -785,22 +794,20 @@ dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
unsigned i,j;
if (src->render_cond.query) {
*dst->render_cond.query = *src->render_cond.query;
dst->render_cond.condition = src->render_cond.condition;
dst->render_cond.mode = src->render_cond.mode;
} else {
dst->render_cond.query = NULL;
}
- util_set_index_buffer(&dst->index_buffer, &src->index_buffer);
-
for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
&src->vertex_buffers[i]);
}
dst->num_so_targets = src->num_so_targets;
for (i = 0; i < ARRAY_SIZE(src->so_targets); i++)
pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
diff --git a/src/gallium/drivers/ddebug/dd_pipe.h b/src/gallium/drivers/ddebug/dd_pipe.h
index ea33193..caad45b 100644
--- a/src/gallium/drivers/ddebug/dd_pipe.h
+++ b/src/gallium/drivers/ddebug/dd_pipe.h
@@ -149,21 +149,20 @@ struct dd_state
};
struct dd_draw_state
{
struct {
struct dd_query *query;
bool condition;
unsigned mode;
} render_cond;
- struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffers[PIPE_MAX_ATTRIBS];
unsigned num_so_targets;
struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
unsigned so_offsets[PIPE_MAX_SO_BUFFERS];
struct dd_state *shaders[PIPE_SHADER_TYPES];
struct pipe_constant_buffer constant_buffers[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
struct pipe_sampler_view *sampler_views[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
struct dd_state *sampler_states[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
diff --git a/src/gallium/drivers/etnaviv/etnaviv_context.c b/src/gallium/drivers/etnaviv/etnaviv_context.c
index 2e5e7f6..306cb6f 100644
--- a/src/gallium/drivers/etnaviv/etnaviv_context.c
+++ b/src/gallium/drivers/etnaviv/etnaviv_context.c
@@ -85,21 +85,21 @@ etna_update_state_for_draw(struct etna_context *ctx, const struct pipe_draw_info
{
/* Handle primitive restart:
* - If not an indexed draw, we don't care about the state of the primitive restart bit.
* - Otherwise, set the bit in INDEX_STREAM_CONTROL in the index buffer state
* accordingly
* - If the value of the INDEX_STREAM_CONTROL register changed due to this, or
* primitive restart is enabled and the restart index changed, mark the index
* buffer state as dirty
*/
- if (info->indexed) {
+ if (info->index_size) {
uint32_t new_control = ctx->index_buffer.FE_INDEX_STREAM_CONTROL;
if (info->primitive_restart)
new_control |= VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART;
else
new_control &= ~VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART;
if (ctx->index_buffer.FE_INDEX_STREAM_CONTROL != new_control ||
(info->primitive_restart && ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX != info->restart_index)) {
ctx->index_buffer.FE_INDEX_STREAM_CONTROL = new_control;
@@ -152,48 +152,55 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
if (!info->count_from_stream_output && !info->indirect &&
!info->primitive_restart &&
!u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
return;
if (ctx->vertex_elements == NULL || ctx->vertex_elements->num_elements == 0)
return; /* Nothing to do */
if (!(ctx->prim_hwsupport & (1 << info->mode))) {
struct primconvert_context *primconvert = ctx->primconvert;
- util_primconvert_save_index_buffer(primconvert, &ctx->index_buffer.ib);
util_primconvert_save_rasterizer_state(primconvert, ctx->rasterizer);
util_primconvert_draw_vbo(primconvert, info);
return;
}
int prims = u_decomposed_prims_for_vertices(info->mode, info->count);
if (unlikely(prims <= 0)) {
DBG("Invalid draw primitive mode=%i or no primitives to be drawn", info->mode);
return;
}
draw_mode = translate_draw_mode(info->mode);
if (draw_mode == ETNA_NO_MATCH) {
BUG("Unsupported draw mode");
return;
}
/* Upload a user index buffer. */
- struct pipe_index_buffer ibuffer_saved = {};
- if (info->indexed && ctx->index_buffer.ib.user_buffer &&
- !util_save_and_upload_index_buffer(pctx, info, &ctx->index_buffer.ib,
- &ibuffer_saved)) {
+ unsigned index_offset = 0;
+ struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
+ if (info->index_size && info->has_user_indices &&
+ !util_upload_index_buffer(pctx, info, &indexbuf, &index_offset)) {
BUG("Index buffer upload failed.");
return;
}
- if (info->indexed && !ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) {
+ if (info->index_size && indexbuf) {
+ ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(indexbuf)->bo;
+ ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = index_offset;
+ ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
+ ctx->index_buffer.FE_INDEX_STREAM_CONTROL = translate_index_size(info->index_size);
+ ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
+ }
+
+ if (info->index_size && !ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) {
BUG("Unsupported or no index buffer");
return;
}
struct etna_shader_key key = {};
struct etna_surface *cbuf = etna_surface(pfb->cbufs[0]);
if (cbuf) {
struct etna_resource *res = etna_resource(cbuf->base.texture);
@@ -232,57 +239,57 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
resource_read(ctx, ctx->constant_buffer[PIPE_SHADER_VERTEX].buffer);
resource_read(ctx, ctx->constant_buffer[PIPE_SHADER_FRAGMENT].buffer);
/* Mark VBOs as being read */
for (i = 0; i < ctx->vertex_buffer.count; i++) {
assert(!ctx->vertex_buffer.vb[i].is_user_buffer);
resource_read(ctx, ctx->vertex_buffer.vb[i].buffer.resource);
}
/* Mark index buffer as being read */
- resource_read(ctx, ctx->index_buffer.ib.buffer);
+ resource_read(ctx, indexbuf);
/* Mark textures as being read */
for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
if (ctx->sampler_view[i])
resource_read(ctx, ctx->sampler_view[i]->texture);
ctx->stats.prims_emitted += u_reduced_prims_for_vertices(info->mode, info->count);
ctx->stats.draw_calls++;
/* Update state for this draw operation */
etna_update_state_for_draw(ctx, info);
/* First, sync state, then emit DRAW_PRIMITIVES or DRAW_INDEXED_PRIMITIVES */
etna_emit_state(ctx);
- if (info->indexed)
+ if (info->index_size)
etna_draw_indexed_primitives(ctx->stream, draw_mode, info->start, prims, info->index_bias);
else
etna_draw_primitives(ctx->stream, draw_mode, info->start, prims);
if (DBG_ENABLED(ETNA_DBG_DRAW_STALL)) {
/* Stall the FE after every draw operation. This allows better
* debug of GPU hang conditions, as the FE will indicate which
* draw op has caused the hang. */
etna_stall(ctx->stream, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
}
if (DBG_ENABLED(ETNA_DBG_FLUSH_ALL))
pctx->flush(pctx, NULL, 0);
if (ctx->framebuffer.cbuf)
etna_resource(ctx->framebuffer.cbuf->texture)->seqno++;
if (ctx->framebuffer.zsbuf)
etna_resource(ctx->framebuffer.zsbuf->texture)->seqno++;
- if (info->indexed && ibuffer_saved.user_buffer)
- pctx->set_index_buffer(pctx, &ibuffer_saved);
+ if (info->index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
}
static void
etna_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
enum pipe_flush_flags flags)
{
struct etna_context *ctx = etna_context(pctx);
int out_fence_fd = -1;
etna_cmd_stream_flush2(ctx->stream, ctx->in_fence_fd,
diff --git a/src/gallium/drivers/etnaviv/etnaviv_context.h b/src/gallium/drivers/etnaviv/etnaviv_context.h
index 56b57b5..2bb8cf5 100644
--- a/src/gallium/drivers/etnaviv/etnaviv_context.h
+++ b/src/gallium/drivers/etnaviv/etnaviv_context.h
@@ -37,21 +37,20 @@
#include "pipe/p_defines.h"
#include "pipe/p_format.h"
#include "pipe/p_shader_tokens.h"
#include "pipe/p_state.h"
#include "util/slab.h"
struct pipe_screen;
struct etna_shader_variant;
struct etna_index_buffer {
- struct pipe_index_buffer ib;
struct etna_reloc FE_INDEX_STREAM_BASE_ADDR;
uint32_t FE_INDEX_STREAM_CONTROL;
uint32_t FE_PRIMITIVE_RESTART_INDEX;
};
struct etna_shader_input {
int vs_reg; /* VS input register */
};
enum etna_varying_special {
diff --git a/src/gallium/drivers/etnaviv/etnaviv_emit.c b/src/gallium/drivers/etnaviv/etnaviv_emit.c
index 7ced5fc..81aaca9 100644
--- a/src/gallium/drivers/etnaviv/etnaviv_emit.c
+++ b/src/gallium/drivers/etnaviv/etnaviv_emit.c
@@ -362,22 +362,21 @@ etna_emit_state(struct etna_context *ctx)
/* multi sample config is set first, and outside of the normal sorting
* order, as changing the multisample state clobbers PS.INPUT_COUNT (and
* possibly PS.TEMP_REGISTER_CONTROL).
*/
if (unlikely(dirty & (ETNA_DIRTY_FRAMEBUFFER | ETNA_DIRTY_SAMPLE_MASK))) {
uint32_t val = VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(ctx->sample_mask);
val |= ctx->framebuffer.GL_MULTI_SAMPLE_CONFIG;
/*03818*/ EMIT_STATE(GL_MULTI_SAMPLE_CONFIG, val);
}
- if (likely(dirty & (ETNA_DIRTY_INDEX_BUFFER)) &&
- ctx->index_buffer.ib.buffer) {
+ if (likely(dirty & (ETNA_DIRTY_INDEX_BUFFER))) {
/*00644*/ EMIT_STATE_RELOC(FE_INDEX_STREAM_BASE_ADDR, &ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR);
/*00648*/ EMIT_STATE(FE_INDEX_STREAM_CONTROL, ctx->index_buffer.FE_INDEX_STREAM_CONTROL);
}
if (likely(dirty & (ETNA_DIRTY_VERTEX_BUFFERS))) {
/*0064C*/ EMIT_STATE_RELOC(FE_VERTEX_STREAM_BASE_ADDR, &ctx->vertex_buffer.cvb[0].FE_VERTEX_STREAM_BASE_ADDR);
/*00650*/ EMIT_STATE(FE_VERTEX_STREAM_CONTROL, ctx->vertex_buffer.cvb[0].FE_VERTEX_STREAM_CONTROL);
}
if (likely(dirty & (ETNA_DIRTY_INDEX_BUFFER))) {
/*00674*/ EMIT_STATE(FE_PRIMITIVE_RESTART_INDEX, ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX);
}
diff --git a/src/gallium/drivers/etnaviv/etnaviv_state.c b/src/gallium/drivers/etnaviv/etnaviv_state.c
index dcc587d..cd9f974 100644
--- a/src/gallium/drivers/etnaviv/etnaviv_state.c
+++ b/src/gallium/drivers/etnaviv/etnaviv_state.c
@@ -440,48 +440,20 @@ etna_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
} else {
cs->FE_VERTEX_STREAM_BASE_ADDR.bo = NULL;
cs->FE_VERTEX_STREAM_CONTROL = 0;
}
}
ctx->dirty |= ETNA_DIRTY_VERTEX_BUFFERS;
}
static void
-etna_set_index_buffer(struct pipe_context *pctx, const struct pipe_index_buffer *ib)
-{
- struct etna_context *ctx = etna_context(pctx);
- uint32_t ctrl;
-
- if (ib) {
- pipe_resource_reference(&ctx->index_buffer.ib.buffer, ib->buffer);
- memcpy(&ctx->index_buffer.ib, ib, sizeof(ctx->index_buffer.ib));
- ctrl = translate_index_size(ctx->index_buffer.ib.index_size);
- } else {
- pipe_resource_reference(&ctx->index_buffer.ib.buffer, NULL);
- ctrl = 0;
- }
-
- if (ctx->index_buffer.ib.buffer && ctrl != ETNA_NO_MATCH) {
- ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(ctx->index_buffer.ib.buffer)->bo;
- ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = ctx->index_buffer.ib.offset;
- ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
- ctx->index_buffer.FE_INDEX_STREAM_CONTROL = ctrl;
- } else {
- ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = NULL;
- ctx->index_buffer.FE_INDEX_STREAM_CONTROL = 0;
- }
-
- ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
-}
-
-static void
etna_blend_state_bind(struct pipe_context *pctx, void *bs)
{
struct etna_context *ctx = etna_context(pctx);
ctx->blend = bs;
ctx->dirty |= ETNA_DIRTY_BLEND;
}
static void
etna_blend_state_delete(struct pipe_context *pctx, void *bs)
@@ -645,21 +617,20 @@ etna_state_init(struct pipe_context *pctx)
pctx->set_stencil_ref = etna_set_stencil_ref;
pctx->set_clip_state = etna_set_clip_state;
pctx->set_sample_mask = etna_set_sample_mask;
pctx->set_constant_buffer = etna_set_constant_buffer;
pctx->set_framebuffer_state = etna_set_framebuffer_state;
pctx->set_polygon_stipple = etna_set_polygon_stipple;
pctx->set_scissor_states = etna_set_scissor_states;
pctx->set_viewport_states = etna_set_viewport_states;
pctx->set_vertex_buffers = etna_set_vertex_buffers;
- pctx->set_index_buffer = etna_set_index_buffer;
pctx->bind_blend_state = etna_blend_state_bind;
pctx->delete_blend_state = etna_blend_state_delete;
pctx->bind_rasterizer_state = etna_rasterizer_state_bind;
pctx->delete_rasterizer_state = etna_rasterizer_state_delete;
pctx->bind_depth_stencil_alpha_state = etna_zsa_state_bind;
pctx->delete_depth_stencil_alpha_state = etna_zsa_state_delete;
diff --git a/src/gallium/drivers/freedreno/a2xx/fd2_draw.c b/src/gallium/drivers/freedreno/a2xx/fd2_draw.c
index eeae10b..4f31619 100644
--- a/src/gallium/drivers/freedreno/a2xx/fd2_draw.c
+++ b/src/gallium/drivers/freedreno/a2xx/fd2_draw.c
@@ -73,21 +73,22 @@ emit_vertexbufs(struct fd_context *ctx)
bufs[i].prsc = vb->buffer.resource;
}
// NOTE I believe the 0x78 (or 0x9c in solid_vp) relates to the
// CONST(20,0) (or CONST(26,0) in soliv_vp)
fd2_emit_vertex_bufs(ctx->batch->draw, 0x78, bufs, vtx->num_elements);
}
static bool
-fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
+fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct fd_ringbuffer *ring = ctx->batch->draw;
if (ctx->dirty & FD_DIRTY_VTXBUF)
emit_vertexbufs(ctx);
fd2_emit_state(ctx, ctx->dirty);
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
OUT_RING(ring, CP_REG(REG_A2XX_VGT_INDX_OFFSET));
diff --git a/src/gallium/drivers/freedreno/a3xx/fd3_draw.c b/src/gallium/drivers/freedreno/a3xx/fd3_draw.c
index b3e42f3..2703224 100644
--- a/src/gallium/drivers/freedreno/a3xx/fd3_draw.c
+++ b/src/gallium/drivers/freedreno/a3xx/fd3_draw.c
@@ -65,21 +65,21 @@ draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
if (emit->dirty & (FD_DIRTY_VTXBUF | FD_DIRTY_VTXSTATE))
fd3_emit_vertex_bufs(ring, emit);
OUT_PKT0(ring, REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, 1);
OUT_RING(ring, 0x0000000b); /* PC_VERTEX_REUSE_BLOCK_CNTL */
OUT_PKT0(ring, REG_A3XX_VFD_INDEX_MIN, 4);
OUT_RING(ring, add_sat(info->min_index, info->index_bias)); /* VFD_INDEX_MIN */
OUT_RING(ring, add_sat(info->max_index, info->index_bias)); /* VFD_INDEX_MAX */
OUT_RING(ring, info->start_instance); /* VFD_INSTANCEID_OFFSET */
- OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
+ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_PKT0(ring, REG_A3XX_PC_RESTART_INDEX, 1);
OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
info->restart_index : 0xffffffff);
/* points + psize -> spritelist: */
if (ctx->rasterizer->point_size_per_vertex &&
fd3_emit_get_vp(emit)->writes_psize &&
(info->mode == PIPE_PRIM_POINTS))
primtype = DI_PT_POINTLIST_PSIZE;
@@ -108,21 +108,22 @@ fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
if (ir3_shader_key_changes_vs(last_key, key)) {
ctx->dirty_shader[PIPE_SHADER_VERTEX] |= FD_DIRTY_SHADER_PROG;
ctx->dirty |= FD_DIRTY_PROG;
}
fd3_ctx->last_key = *key;
}
}
static bool
-fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
+fd3_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd3_emit emit = {
.debug = &ctx->debug,
.vtx = &ctx->vtx,
.prog = &ctx->prog,
.info = info,
.key = {
.color_two_side = ctx->rasterizer->light_twoside,
.vclamp_color = ctx->rasterizer->clamp_vertex_color,
diff --git a/src/gallium/drivers/freedreno/a3xx/fd3_emit.c b/src/gallium/drivers/freedreno/a3xx/fd3_emit.c
index 3b61cac..aefbbea 100644
--- a/src/gallium/drivers/freedreno/a3xx/fd3_emit.c
+++ b/src/gallium/drivers/freedreno/a3xx/fd3_emit.c
@@ -615,21 +615,21 @@ fd3_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
uint32_t val = fd3_rasterizer_stateobj(ctx->rasterizer)
->pc_prim_vtx_cntl;
if (!emit->key.binning_pass) {
uint32_t stride_in_vpc = align(fp->total_in, 4) / 4;
if (stride_in_vpc > 0)
stride_in_vpc = MAX2(stride_in_vpc, 2);
val |= A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(stride_in_vpc);
}
- if (info->indexed && info->primitive_restart) {
+ if (info->index_size && info->primitive_restart) {
val |= A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
}
val |= COND(vp->writes_psize, A3XX_PC_PRIM_VTX_CNTL_PSIZE);
OUT_PKT0(ring, REG_A3XX_PC_PRIM_VTX_CNTL, 1);
OUT_RING(ring, val);
}
if (dirty & (FD_DIRTY_SCISSOR | FD_DIRTY_RASTERIZER | FD_DIRTY_VIEWPORT)) {
diff --git a/src/gallium/drivers/freedreno/a4xx/fd4_draw.c b/src/gallium/drivers/freedreno/a4xx/fd4_draw.c
index a76f9e8..840e917 100644
--- a/src/gallium/drivers/freedreno/a4xx/fd4_draw.c
+++ b/src/gallium/drivers/freedreno/a4xx/fd4_draw.c
@@ -37,47 +37,47 @@
#include "fd4_draw.h"
#include "fd4_context.h"
#include "fd4_emit.h"
#include "fd4_program.h"
#include "fd4_format.h"
#include "fd4_zsa.h"
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd4_emit *emit)
+ struct fd4_emit *emit, unsigned index_offset)
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
fd4_emit_state(ctx, ring, emit);
if (emit->dirty & (FD_DIRTY_VTXBUF | FD_DIRTY_VTXSTATE))
fd4_emit_vertex_bufs(ring, emit);
OUT_PKT0(ring, REG_A4XX_VFD_INDEX_OFFSET, 2);
- OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
+ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_RING(ring, info->start_instance); /* ??? UNKNOWN_2209 */
OUT_PKT0(ring, REG_A4XX_PC_RESTART_INDEX, 1);
OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
info->restart_index : 0xffffffff);
/* points + psize -> spritelist: */
if (ctx->rasterizer->point_size_per_vertex &&
fd4_emit_get_vp(emit)->writes_psize &&
(info->mode == PIPE_PRIM_POINTS))
primtype = DI_PT_POINTLIST_PSIZE;
fd4_draw_emit(ctx->batch, ring, primtype,
emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
- info);
+ info, index_offset);
}
/* fixup dirty shader state in case some "unrelated" (from the state-
* tracker's perspective) state change causes us to switch to a
* different variant.
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
{
struct fd4_context *fd4_ctx = fd4_context(ctx);
@@ -92,21 +92,22 @@ fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
if (ir3_shader_key_changes_vs(last_key, key)) {
ctx->dirty_shader[PIPE_SHADER_VERTEX] |= FD_DIRTY_SHADER_PROG;
ctx->dirty |= FD_DIRTY_PROG;
}
fd4_ctx->last_key = *key;
}
}
static bool
-fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
+fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd4_emit emit = {
.debug = &ctx->debug,
.vtx = &ctx->vtx,
.prog = &ctx->prog,
.info = info,
.key = {
.color_two_side = ctx->rasterizer->light_twoside,
.vclamp_color = ctx->rasterizer->clamp_vertex_color,
@@ -146,36 +147,36 @@ fd4_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
struct fd_ringbuffer *ring = ctx->batch->draw;
if (ctx->rasterizer->rasterizer_discard) {
fd_wfi(ctx->batch, ring);
OUT_PKT3(ring, CP_REG_RMW, 3);
OUT_RING(ring, REG_A4XX_RB_RENDER_CONTROL);
OUT_RING(ring, ~A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE);
OUT_RING(ring, A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE);
}
- draw_impl(ctx, ctx->batch->draw, &emit);
+ draw_impl(ctx, ctx->batch->draw, &emit, index_offset);
if (ctx->rasterizer->rasterizer_discard) {
fd_wfi(ctx->batch, ring);
OUT_PKT3(ring, CP_REG_RMW, 3);
OUT_RING(ring, REG_A4XX_RB_RENDER_CONTROL);
OUT_RING(ring, ~A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE);
OUT_RING(ring, 0);
}
/* and now binning pass: */
emit.key.binning_pass = true;
emit.dirty = dirty & ~(FD_DIRTY_BLEND);
emit.vp = NULL; /* we changed key so need to refetch vp */
emit.fp = NULL;
- draw_impl(ctx, ctx->batch->binning, &emit);
+ draw_impl(ctx, ctx->batch->binning, &emit, index_offset);
fd_context_all_clean(ctx);
return true;
}
void
fd4_draw_init(struct pipe_context *pctx)
{
struct fd_context *ctx = fd_context(pctx);
diff --git a/src/gallium/drivers/freedreno/a4xx/fd4_draw.h b/src/gallium/drivers/freedreno/a4xx/fd4_draw.h
index 634b64b..950675f 100644
--- a/src/gallium/drivers/freedreno/a4xx/fd4_draw.h
+++ b/src/gallium/drivers/freedreno/a4xx/fd4_draw.h
@@ -97,36 +97,35 @@ fd4_size2indextype(unsigned index_size)
}
DBG("unsupported index size: %d", index_size);
assert(0);
return INDEX4_SIZE_32_BIT;
}
static inline void
fd4_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
- const struct pipe_draw_info *info)
+ const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct pipe_resource *idx_buffer = NULL;
enum a4xx_index_size idx_type;
enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset;
- if (info->indexed) {
- struct pipe_index_buffer *idx = &batch->ctx->indexbuf;
+ if (info->index_size) {
+ assert(!info->has_user_indices);
- assert(!idx->user_buffer);
-
- idx_buffer = idx->buffer;
- idx_type = fd4_size2indextype(idx->index_size);
- idx_size = idx->index_size * info->count;
- idx_offset = idx->offset + (info->start * idx->index_size);
+ idx_buffer = info->index.resource;
+ idx_type = fd4_size2indextype(info->index_size);
+ idx_size = info->index_size * info->count;
+ idx_offset = index_offset + info->start * info->index_size;
src_sel = DI_SRC_SEL_DMA;
} else {
idx_buffer = NULL;
idx_type = INDEX4_SIZE_32_BIT;
idx_size = 0;
idx_offset = 0;
src_sel = DI_SRC_SEL_AUTO_INDEX;
}
fd4_draw(batch, ring, primtype, vismode, src_sel,
diff --git a/src/gallium/drivers/freedreno/a4xx/fd4_emit.c b/src/gallium/drivers/freedreno/a4xx/fd4_emit.c
index ba024bd..0f7c647 100644
--- a/src/gallium/drivers/freedreno/a4xx/fd4_emit.c
+++ b/src/gallium/drivers/freedreno/a4xx/fd4_emit.c
@@ -593,21 +593,21 @@ fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
* state object, we need to make sure that we always emit
* PRIM_VTX_CNTL.. either that or be more clever and detect
* when it changes.
*/
if (emit->info) {
const struct pipe_draw_info *info = emit->info;
struct fd4_rasterizer_stateobj *rast =
fd4_rasterizer_stateobj(ctx->rasterizer);
uint32_t val = rast->pc_prim_vtx_cntl;
- if (info->indexed && info->primitive_restart)
+ if (info->index_size && info->primitive_restart)
val |= A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
val |= COND(vp->writes_psize, A4XX_PC_PRIM_VTX_CNTL_PSIZE);
if (fp->total_in > 0) {
uint32_t varout = align(fp->total_in, 16) / 16;
if (varout > 1)
varout = align(varout, 2);
val |= A4XX_PC_PRIM_VTX_CNTL_VAROUT(varout);
}
diff --git a/src/gallium/drivers/freedreno/a5xx/fd5_draw.c b/src/gallium/drivers/freedreno/a5xx/fd5_draw.c
index 4ef0c73..5c73573 100644
--- a/src/gallium/drivers/freedreno/a5xx/fd5_draw.c
+++ b/src/gallium/drivers/freedreno/a5xx/fd5_draw.c
@@ -35,42 +35,42 @@
#include "fd5_draw.h"
#include "fd5_context.h"
#include "fd5_emit.h"
#include "fd5_program.h"
#include "fd5_format.h"
#include "fd5_zsa.h"
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd5_emit *emit)
+ struct fd5_emit *emit, unsigned index_offset)
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
fd5_emit_state(ctx, ring, emit);
if (emit->dirty & (FD_DIRTY_VTXBUF | FD_DIRTY_VTXSTATE))
fd5_emit_vertex_bufs(ring, emit);
OUT_PKT4(ring, REG_A5XX_VFD_INDEX_OFFSET, 2);
- OUT_RING(ring, info->indexed ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
+ OUT_RING(ring, info->index_size ? info->index_bias : info->start); /* VFD_INDEX_OFFSET */
OUT_RING(ring, info->start_instance); /* ??? UNKNOWN_2209 */
OUT_PKT4(ring, REG_A5XX_PC_RESTART_INDEX, 1);
OUT_RING(ring, info->primitive_restart ? /* PC_RESTART_INDEX */
info->restart_index : 0xffffffff);
fd5_emit_render_cntl(ctx, false);
fd5_draw_emit(ctx->batch, ring, primtype,
emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
- info);
+ info, index_offset);
}
/* fixup dirty shader state in case some "unrelated" (from the state-
* tracker's perspective) state change causes us to switch to a
* different variant.
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
@@ -85,21 +85,22 @@ fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
if (ir3_shader_key_changes_vs(last_key, key)) {
ctx->dirty_shader[PIPE_SHADER_VERTEX] |= FD_DIRTY_SHADER_PROG;
ctx->dirty |= FD_DIRTY_PROG;
}
fd5_ctx->last_key = *key;
}
}
static bool
-fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
+fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct fd5_emit emit = {
.debug = &ctx->debug,
.vtx = &ctx->vtx,
.prog = &ctx->prog,
.info = info,
.key = {
.color_two_side = ctx->rasterizer->light_twoside,
.vclamp_color = ctx->rasterizer->clamp_vertex_color,
@@ -129,21 +130,21 @@ fd5_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *info)
unsigned dirty = ctx->dirty;
/* do regular pass first, since that is more likely to fail compiling: */
if (!(fd5_emit_get_vp(&emit) && fd5_emit_get_fp(&emit)))
return false;
emit.key.binning_pass = false;
emit.dirty = dirty;
- draw_impl(ctx, ctx->batch->draw, &emit);
+ draw_impl(ctx, ctx->batch->draw, &emit, index_offset);
// /* and now binning pass: */
// emit.key.binning_pass = true;
// emit.dirty = dirty & ~(FD_DIRTY_BLEND);
// emit.vp = NULL; /* we changed key so need to refetch vp */
// emit.fp = NULL;
// draw_impl(ctx, ctx->batch->binning, &emit);
if (emit.streamout_mask) {
struct fd_ringbuffer *ring = ctx->batch->draw;
diff --git a/src/gallium/drivers/freedreno/a5xx/fd5_draw.h b/src/gallium/drivers/freedreno/a5xx/fd5_draw.h
index 8ce70d3..5baf167 100644
--- a/src/gallium/drivers/freedreno/a5xx/fd5_draw.h
+++ b/src/gallium/drivers/freedreno/a5xx/fd5_draw.h
@@ -73,36 +73,35 @@ fd5_draw(struct fd_batch *batch, struct fd_ringbuffer *ring,
emit_marker5(ring, 7);
fd_reset_wfi(batch);
}
static inline void
fd5_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
- const struct pipe_draw_info *info)
+ const struct pipe_draw_info *info,
+ unsigned index_offset)
{
struct pipe_resource *idx_buffer = NULL;
enum a4xx_index_size idx_type;
enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset;
- if (info->indexed) {
- struct pipe_index_buffer *idx = &batch->ctx->indexbuf;
+ if (info->index_size) {
+ assert(!info->has_user_indices);
- assert(!idx->user_buffer);
-
- idx_buffer = idx->buffer;
- idx_type = fd4_size2indextype(idx->index_size);
- idx_size = idx->index_size * info->count;
- idx_offset = idx->offset + (info->start * idx->index_size);
+ idx_buffer = info->index.resource;
+ idx_type = fd4_size2indextype(info->index_size);
+ idx_size = info->index_size * info->count;
+ idx_offset = index_offset + info->start * info->index_size;
src_sel = DI_SRC_SEL_DMA;
} else {
idx_buffer = NULL;
idx_type = INDEX4_SIZE_32_BIT;
idx_size = 0;
idx_offset = 0;
src_sel = DI_SRC_SEL_AUTO_INDEX;
}
fd5_draw(batch, ring, primtype, vismode, src_sel,
diff --git a/src/gallium/drivers/freedreno/freedreno_context.h b/src/gallium/drivers/freedreno/freedreno_context.h
index b50e66c..f8093ce 100644
--- a/src/gallium/drivers/freedreno/freedreno_context.h
+++ b/src/gallium/drivers/freedreno/freedreno_context.h
@@ -113,21 +113,21 @@ enum fd_dirty_3d_state {
FD_DIRTY_RASTERIZER = BIT(1),
FD_DIRTY_ZSA = BIT(2),
FD_DIRTY_BLEND_COLOR = BIT(3),
FD_DIRTY_STENCIL_REF = BIT(4),
FD_DIRTY_SAMPLE_MASK = BIT(5),
FD_DIRTY_FRAMEBUFFER = BIT(6),
FD_DIRTY_STIPPLE = BIT(7),
FD_DIRTY_VIEWPORT = BIT(8),
FD_DIRTY_VTXSTATE = BIT(9),
FD_DIRTY_VTXBUF = BIT(10),
- FD_DIRTY_INDEXBUF = BIT(11),
+
FD_DIRTY_SCISSOR = BIT(12),
FD_DIRTY_STREAMOUT = BIT(13),
FD_DIRTY_UCP = BIT(14),
FD_DIRTY_BLEND_DUAL = BIT(15),
/* These are a bit redundent with fd_dirty_shader_state, and possibly
* should be removed. (But OTOH kinda convenient in some places)
*/
FD_DIRTY_PROG = BIT(16),
FD_DIRTY_CONST = BIT(17),
@@ -257,21 +257,20 @@ struct fd_context {
struct fd_program_stateobj prog;
struct fd_vertex_state vtx;
struct pipe_blend_color blend_color;
struct pipe_stencil_ref stencil_ref;
unsigned sample_mask;
struct pipe_poly_stipple stipple;
struct pipe_viewport_state viewport;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
- struct pipe_index_buffer indexbuf;
struct fd_streamout_stateobj streamout;
struct pipe_clip_state ucp;
struct pipe_query *cond_query;
bool cond_cond; /* inverted rendering condition */
uint cond_mode;
struct pipe_debug_callback debug;
/* GMEM/tile handling fxns: */
@@ -280,21 +279,22 @@ struct fd_context {
void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile);
void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile);
void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile);
void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
/* optional, for GMEM bypass: */
void (*emit_sysmem_prep)(struct fd_batch *batch);
void (*emit_sysmem_fini)(struct fd_batch *batch);
/* draw: */
- bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info);
+ bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset);
void (*clear)(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil);
/* constant emit: (note currently not used/needed for a2xx) */
void (*emit_const)(struct fd_ringbuffer *ring, enum shader_t type,
uint32_t regid, uint32_t offset, uint32_t sizedwords,
const uint32_t *dwords, struct pipe_resource *prsc);
/* emit bo addresses as constant: */
void (*emit_const_bo)(struct fd_ringbuffer *ring, enum shader_t type, boolean write,
uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets);
diff --git a/src/gallium/drivers/freedreno/freedreno_draw.c b/src/gallium/drivers/freedreno/freedreno_draw.c
index 61e0bfb..7f04931 100644
--- a/src/gallium/drivers/freedreno/freedreno_draw.c
+++ b/src/gallium/drivers/freedreno/freedreno_draw.c
@@ -78,31 +78,30 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
}
/* TODO: push down the region versions into the tiles */
if (!fd_render_condition_check(pctx))
return;
/* emulate unsupported primitives: */
if (!fd_supported_prim(ctx, info->mode)) {
if (ctx->streamout.num_targets > 0)
debug_error("stream-out with emulated prims");
- util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf);
util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer);
util_primconvert_draw_vbo(ctx->primconvert, info);
return;
}
/* Upload a user index buffer. */
- struct pipe_index_buffer ibuffer_saved = {};
- if (info->indexed && ctx->indexbuf.user_buffer &&
- !util_save_and_upload_index_buffer(pctx, info, &ctx->indexbuf,
- &ibuffer_saved)) {
+ struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
+ unsigned index_offset = 0;
+ if (info->index_size && info->has_user_indices &&
+ !util_upload_index_buffer(pctx, info, &indexbuf, &index_offset)) {
return;
}
if (ctx->in_blit) {
fd_batch_reset(batch);
fd_context_all_dirty(ctx);
}
batch->blit = ctx->in_blit;
batch->back_blit = ctx->in_shadow;
@@ -156,21 +155,21 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask)
resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
/* Mark VBOs as being read */
foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) {
assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
}
/* Mark index buffer as being read */
- resource_read(batch, ctx->indexbuf.buffer);
+ resource_read(batch, indexbuf);
/* Mark textures as being read */
foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures)
resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture);
foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures)
resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture);
/* Mark streamout buffers as being written.. */
for (i = 0; i < ctx->streamout.num_targets; i++)
if (ctx->streamout.targets[i])
@@ -202,33 +201,32 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
/* any buffers that haven't been cleared yet, we need to restore: */
batch->restore |= buffers & (FD_BUFFER_ALL & ~batch->cleared);
/* and any buffers used, need to be resolved: */
batch->resolve |= buffers;
DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers,
pfb->width, pfb->height, batch->num_draws,
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
- if (ctx->draw_vbo(ctx, info))
+ if (ctx->draw_vbo(ctx, info, index_offset))
batch->needs_flush = true;
for (i = 0; i < ctx->streamout.num_targets; i++)
ctx->streamout.offsets[i] += info->count;
if (fd_mesa_debug & FD_DBG_DDRAW)
fd_context_all_dirty(ctx);
fd_batch_check_size(batch);
-
- if (info->indexed && ibuffer_saved.user_buffer)
- pctx->set_index_buffer(pctx, &ibuffer_saved);
+ if (info->index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
}
/* Generic clear implementation (partially) using u_blitter: */
static void
fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
struct blitter_context *blitter = ctx->blitter;
@@ -273,21 +271,21 @@ fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
pctx->set_stream_output_targets(pctx, 0, NULL, NULL);
pctx->bind_vs_state(pctx, ctx->solid_prog.vp);
pctx->bind_fs_state(pctx, ctx->solid_prog.fp);
struct pipe_draw_info info = {
.mode = PIPE_PRIM_MAX, /* maps to DI_PT_RECTLIST */
.count = 2,
.max_index = 1,
.instance_count = 1,
};
- ctx->draw_vbo(ctx, &info);
+ ctx->draw_vbo(ctx, &info, 0);
util_blitter_restore_constant_buffer_state(blitter);
util_blitter_restore_vertex_states(blitter);
util_blitter_restore_fragment_states(blitter);
util_blitter_restore_textures(blitter);
util_blitter_restore_fb_state(blitter);
util_blitter_restore_render_cond(blitter);
util_blitter_unset_running_flag(blitter);
fd_blitter_pipe_end(ctx);
diff --git a/src/gallium/drivers/freedreno/freedreno_draw.h b/src/gallium/drivers/freedreno/freedreno_draw.h
index 18a5037..f2163bb 100644
--- a/src/gallium/drivers/freedreno/freedreno_draw.h
+++ b/src/gallium/drivers/freedreno/freedreno_draw.h
@@ -115,29 +115,27 @@ static inline void
fd_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
const struct pipe_draw_info *info)
{
struct pipe_resource *idx_buffer = NULL;
enum pc_di_index_size idx_type = INDEX_SIZE_IGN;
enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset;
- if (info->indexed) {
- struct pipe_index_buffer *idx = &batch->ctx->indexbuf;
+ if (info->index_size) {
+ assert(!info->has_user_indices);
- assert(!idx->user_buffer);
-
- idx_buffer = idx->buffer;
- idx_type = size2indextype(idx->index_size);
- idx_size = idx->index_size * info->count;
- idx_offset = idx->offset + (info->start * idx->index_size);
+ idx_buffer = info->index.resource;
+ idx_type = size2indextype(info->index_size);
+ idx_size = info->index_size * info->count;
+ idx_offset = info->start * info->index_size;
src_sel = DI_SRC_SEL_DMA;
} else {
idx_buffer = NULL;
idx_type = INDEX_SIZE_IGN;
idx_size = 0;
idx_offset = 0;
src_sel = DI_SRC_SEL_AUTO_INDEX;
}
fd_draw(batch, ring, primtype, vismode, src_sel,
diff --git a/src/gallium/drivers/freedreno/freedreno_resource.c b/src/gallium/drivers/freedreno/freedreno_resource.c
index eeeae07..863379c 100644
--- a/src/gallium/drivers/freedreno/freedreno_resource.c
+++ b/src/gallium/drivers/freedreno/freedreno_resource.c
@@ -55,24 +55,20 @@ fd_invalidate_resource(struct fd_context *ctx, struct pipe_resource *prsc)
* anywhere. If it is, mark the relevant state as dirty. This is called on
* realloc_bo.
*/
/* VBOs */
for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc)
ctx->dirty |= FD_DIRTY_VTXBUF;
}
- /* Index buffer */
- if (ctx->indexbuf.buffer == prsc)
- ctx->dirty |= FD_DIRTY_INDEXBUF;
-
/* per-shader-stage resources: */
for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
/* Constbufs.. note that constbuf[0] is normal uniforms emitted in
* cmdstream rather than by pointer..
*/
const unsigned num_ubos = util_last_bit(ctx->constbuf[stage].enabled_mask);
for (unsigned i = 1; i < num_ubos; i++) {
if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_CONST)
break;
if (ctx->constbuf[stage].cb[i].buffer == prsc)
diff --git a/src/gallium/drivers/freedreno/freedreno_state.c b/src/gallium/drivers/freedreno/freedreno_state.c
index 18f012d..e40ee2d 100644
--- a/src/gallium/drivers/freedreno/freedreno_state.c
+++ b/src/gallium/drivers/freedreno/freedreno_state.c
@@ -222,38 +222,20 @@ fd_set_vertex_buffers(struct pipe_context *pctx,
}
}
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, count);
so->count = util_last_bit(so->enabled_mask);
ctx->dirty |= FD_DIRTY_VTXBUF;
}
static void
-fd_set_index_buffer(struct pipe_context *pctx,
- const struct pipe_index_buffer *ib)
-{
- struct fd_context *ctx = fd_context(pctx);
-
- if (ib) {
- pipe_resource_reference(&ctx->indexbuf.buffer, ib->buffer);
- ctx->indexbuf.index_size = ib->index_size;
- ctx->indexbuf.offset = ib->offset;
- ctx->indexbuf.user_buffer = ib->user_buffer;
- } else {
- pipe_resource_reference(&ctx->indexbuf.buffer, NULL);
- }
-
- ctx->dirty |= FD_DIRTY_INDEXBUF;
-}
-
-static void
fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_blend_state *cso = hwcso;
bool old_is_dual = ctx->blend ?
ctx->blend->rt[0].blend_enable && util_blend_state_is_dual(ctx->blend, 0) :
false;
bool new_is_dual = cso ?
cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) :
false;
@@ -410,21 +392,20 @@ fd_state_init(struct pipe_context *pctx)
pctx->set_stencil_ref = fd_set_stencil_ref;
pctx->set_clip_state = fd_set_clip_state;
pctx->set_sample_mask = fd_set_sample_mask;
pctx->set_constant_buffer = fd_set_constant_buffer;
pctx->set_framebuffer_state = fd_set_framebuffer_state;
pctx->set_polygon_stipple = fd_set_polygon_stipple;
pctx->set_scissor_states = fd_set_scissor_states;
pctx->set_viewport_states = fd_set_viewport_states;
pctx->set_vertex_buffers = fd_set_vertex_buffers;
- pctx->set_index_buffer = fd_set_index_buffer;
pctx->bind_blend_state = fd_blend_state_bind;
pctx->delete_blend_state = fd_blend_state_delete;
pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
diff --git a/src/gallium/drivers/freedreno/ir3/ir3_shader.c b/src/gallium/drivers/freedreno/ir3/ir3_shader.c
index 1d54d53..0fb0cd5 100644
--- a/src/gallium/drivers/freedreno/ir3/ir3_shader.c
+++ b/src/gallium/drivers/freedreno/ir3/ir3_shader.c
@@ -674,21 +674,21 @@ ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *rin
if (shader_dirty)
emit_immediates(ctx, v, ring);
}
/* emit driver params every time: */
/* TODO skip emit if shader doesn't use driver params to avoid WFI.. */
if (info) {
uint32_t offset = v->constbase.driver_param;
if (v->constlen > offset) {
uint32_t vertex_params[IR3_DP_COUNT] = {
- [IR3_DP_VTXID_BASE] = info->indexed ?
+ [IR3_DP_VTXID_BASE] = info->index_size ?
info->index_bias : info->start,
[IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v),
};
/* if no user-clip-planes, we don't need to emit the
* entire thing:
*/
uint32_t vertex_params_size = 4;
if (v->key.ucp_enables) {
struct pipe_clip_state *ucp = &ctx->ucp;
diff --git a/src/gallium/drivers/i915/i915_context.c b/src/gallium/drivers/i915/i915_context.c
index d229700..8ea9440 100644
--- a/src/gallium/drivers/i915/i915_context.c
+++ b/src/gallium/drivers/i915/i915_context.c
@@ -76,27 +76,27 @@ i915_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
const void *buf = i915->vertex_buffers[i].is_user_buffer ?
i915->vertex_buffers[i].buffer.user : NULL;
if (!buf)
buf = i915_buffer(i915->vertex_buffers[i].buffer.resource)->data;
draw_set_mapped_vertex_buffer(draw, i, buf, ~0);
}
/*
* Map index buffer, if present
*/
- if (info->indexed) {
- mapped_indices = i915->index_buffer.user_buffer;
+ if (info->index_size) {
+ mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices)
- mapped_indices = i915_buffer(i915->index_buffer.buffer)->data;
+ mapped_indices = i915_buffer(info->index.resource)->data;
draw_set_indexes(draw,
- (ubyte *) mapped_indices + i915->index_buffer.offset,
- i915->index_buffer.index_size, ~0);
+ (ubyte *) mapped_indices,
+ info->index_size, ~0);
}
if (i915->constants[PIPE_SHADER_VERTEX])
draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0,
i915_buffer(i915->constants[PIPE_SHADER_VERTEX])->data,
(i915->current.num_user_constants[PIPE_SHADER_VERTEX] *
4 * sizeof(float)));
else
draw_set_mapped_constant_buffer(draw, PIPE_SHADER_VERTEX, 0, NULL, 0);
diff --git a/src/gallium/drivers/i915/i915_context.h b/src/gallium/drivers/i915/i915_context.h
index ea13834..626a17f 100644
--- a/src/gallium/drivers/i915/i915_context.h
+++ b/src/gallium/drivers/i915/i915_context.h
@@ -242,21 +242,20 @@ struct i915_context {
struct pipe_blend_color blend_color;
struct pipe_stencil_ref stencil_ref;
struct pipe_clip_state clip;
struct pipe_resource *constants[PIPE_SHADER_TYPES];
struct pipe_framebuffer_state framebuffer;
struct pipe_poly_stipple poly_stipple;
struct pipe_scissor_state scissor;
struct pipe_sampler_view *fragment_sampler_views[PIPE_MAX_SAMPLERS];
struct pipe_sampler_view *vertex_sampler_views[PIPE_MAX_SAMPLERS];
struct pipe_viewport_state viewport;
- struct pipe_index_buffer index_buffer;
unsigned dirty;
struct pipe_resource *mapped_vs_tex[PIPE_MAX_SAMPLERS];
struct i915_winsys_buffer* mapped_vs_tex_buffer[PIPE_MAX_SAMPLERS];
unsigned num_samplers;
unsigned num_fragment_sampler_views;
unsigned num_vertex_samplers;
unsigned num_vertex_sampler_views;
diff --git a/src/gallium/drivers/i915/i915_state.c b/src/gallium/drivers/i915/i915_state.c
index 3747922..ddc2709 100644
--- a/src/gallium/drivers/i915/i915_state.c
+++ b/src/gallium/drivers/i915/i915_state.c
@@ -1053,31 +1053,20 @@ i915_bind_vertex_elements_state(struct pipe_context *pipe,
i915_velems->count, i915_velems->velem);
}
}
static void
i915_delete_vertex_elements_state(struct pipe_context *pipe, void *velems)
{
FREE( velems );
}
-static void i915_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct i915_context *i915 = i915_context(pipe);
-
- if (ib)
- memcpy(&i915->index_buffer, ib, sizeof(i915->index_buffer));
- else
- memset(&i915->index_buffer, 0, sizeof(i915->index_buffer));
-}
-
static void
i915_set_sample_mask(struct pipe_context *pipe,
unsigned sample_mask)
{
}
void
i915_init_state_functions( struct i915_context *i915 )
{
i915->base.create_blend_state = i915_create_blend_state;
@@ -1112,12 +1101,11 @@ i915_init_state_functions( struct i915_context *i915 )
i915->base.set_constant_buffer = i915_set_constant_buffer;
i915->base.set_framebuffer_state = i915_set_framebuffer_state;
i915->base.set_polygon_stipple = i915_set_polygon_stipple;
i915->base.set_scissor_states = i915_set_scissor_states;
i915->base.set_sampler_views = i915_set_sampler_views;
i915->base.create_sampler_view = i915_create_sampler_view;
i915->base.sampler_view_destroy = i915_sampler_view_destroy;
i915->base.set_viewport_states = i915_set_viewport_states;
i915->base.set_vertex_buffers = i915_set_vertex_buffers;
- i915->base.set_index_buffer = i915_set_index_buffer;
}
diff --git a/src/gallium/drivers/llvmpipe/lp_context.h b/src/gallium/drivers/llvmpipe/lp_context.h
index d4bd02d..6f1c7d9 100644
--- a/src/gallium/drivers/llvmpipe/lp_context.h
+++ b/src/gallium/drivers/llvmpipe/lp_context.h
@@ -74,21 +74,20 @@ struct llvmpipe_context {
struct pipe_stencil_ref stencil_ref;
struct pipe_clip_state clip;
struct pipe_constant_buffer constants[PIPE_SHADER_TYPES][LP_MAX_TGSI_CONST_BUFFERS];
struct pipe_framebuffer_state framebuffer;
struct pipe_poly_stipple poly_stipple;
struct pipe_scissor_state scissors[PIPE_MAX_VIEWPORTS];
struct pipe_sampler_view *sampler_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_SAMPLER_VIEWS];
struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS];
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer index_buffer;
unsigned num_samplers[PIPE_SHADER_TYPES];
unsigned num_sampler_views[PIPE_SHADER_TYPES];
unsigned num_vertex_buffers;
struct draw_so_target *so_targets[PIPE_MAX_SO_BUFFERS];
int num_so_targets;
struct pipe_query_data_so_statistics so_stats;
diff --git a/src/gallium/drivers/llvmpipe/lp_draw_arrays.c b/src/gallium/drivers/llvmpipe/lp_draw_arrays.c
index 9a9c2f7..2efe3ef 100644
--- a/src/gallium/drivers/llvmpipe/lp_draw_arrays.c
+++ b/src/gallium/drivers/llvmpipe/lp_draw_arrays.c
@@ -80,34 +80,30 @@ llvmpipe_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
if (!lp->vertex_buffer[i].buffer.resource) {
continue;
}
buf = llvmpipe_resource_data(lp->vertex_buffer[i].buffer.resource);
size = lp->vertex_buffer[i].buffer.resource->width0;
}
draw_set_mapped_vertex_buffer(draw, i, buf, size);
}
/* Map index buffer, if present */
- if (info->indexed) {
+ if (info->index_size) {
unsigned available_space = ~0;
- mapped_indices = lp->index_buffer.user_buffer;
+ mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices) {
- mapped_indices = llvmpipe_resource_data(lp->index_buffer.buffer);
- if (lp->index_buffer.buffer->width0 > lp->index_buffer.offset)
- available_space =
- (lp->index_buffer.buffer->width0 - lp->index_buffer.offset);
- else
- available_space = 0;
+ mapped_indices = llvmpipe_resource_data(info->index.resource);
+ available_space = info->index.resource->width0;
}
draw_set_indexes(draw,
- (ubyte *) mapped_indices + lp->index_buffer.offset,
- lp->index_buffer.index_size, available_space);
+ (ubyte *) mapped_indices,
+ info->index_size, available_space);
}
for (i = 0; i < lp->num_so_targets; i++) {
void *buf = 0;
if (lp->so_targets[i]) {
buf = llvmpipe_resource(lp->so_targets[i]->target.buffer)->data;
lp->so_targets[i]->mapping = buf;
}
}
draw_set_mapped_so_targets(draw, lp->num_so_targets,
diff --git a/src/gallium/drivers/llvmpipe/lp_state_vertex.c b/src/gallium/drivers/llvmpipe/lp_state_vertex.c
index 1e93fd8..702ecf9 100644
--- a/src/gallium/drivers/llvmpipe/lp_state_vertex.c
+++ b/src/gallium/drivers/llvmpipe/lp_state_vertex.c
@@ -86,32 +86,19 @@ llvmpipe_set_vertex_buffers(struct pipe_context *pipe,
util_set_vertex_buffers_count(llvmpipe->vertex_buffer,
&llvmpipe->num_vertex_buffers,
buffers, start_slot, count);
llvmpipe->dirty |= LP_NEW_VERTEX;
draw_set_vertex_buffers(llvmpipe->draw, start_slot, count, buffers);
}
-static void
-llvmpipe_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
-
- if (ib)
- memcpy(&llvmpipe->index_buffer, ib, sizeof(llvmpipe->index_buffer));
- else
- memset(&llvmpipe->index_buffer, 0, sizeof(llvmpipe->index_buffer));
-}
-
void
llvmpipe_init_vertex_funcs(struct llvmpipe_context *llvmpipe)
{
llvmpipe->pipe.create_vertex_elements_state = llvmpipe_create_vertex_elements_state;
llvmpipe->pipe.bind_vertex_elements_state = llvmpipe_bind_vertex_elements_state;
llvmpipe->pipe.delete_vertex_elements_state = llvmpipe_delete_vertex_elements_state;
llvmpipe->pipe.set_vertex_buffers = llvmpipe_set_vertex_buffers;
- llvmpipe->pipe.set_index_buffer = llvmpipe_set_index_buffer;
}
diff --git a/src/gallium/drivers/noop/noop_state.c b/src/gallium/drivers/noop/noop_state.c
index 32a54e9..46d99ab 100644
--- a/src/gallium/drivers/noop/noop_state.c
+++ b/src/gallium/drivers/noop/noop_state.c
@@ -181,25 +181,20 @@ static void noop_surface_destroy(struct pipe_context *ctx,
static void noop_bind_state(struct pipe_context *ctx, void *state)
{
}
static void noop_delete_state(struct pipe_context *ctx, void *state)
{
FREE(state);
}
-static void noop_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
-}
-
static void noop_set_vertex_buffers(struct pipe_context *ctx,
unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *buffers)
{
}
static void *noop_create_vertex_elements(struct pipe_context *ctx,
unsigned count,
const struct pipe_vertex_element *state)
{
@@ -291,20 +286,19 @@ void noop_init_state_functions(struct pipe_context *ctx)
ctx->set_blend_color = noop_set_blend_color;
ctx->set_clip_state = noop_set_clip_state;
ctx->set_constant_buffer = noop_set_constant_buffer;
ctx->set_sampler_views = noop_set_sampler_views;
ctx->set_framebuffer_state = noop_set_framebuffer_state;
ctx->set_polygon_stipple = noop_set_polygon_stipple;
ctx->set_sample_mask = noop_set_sample_mask;
ctx->set_scissor_states = noop_set_scissor_states;
ctx->set_stencil_ref = noop_set_stencil_ref;
ctx->set_vertex_buffers = noop_set_vertex_buffers;
- ctx->set_index_buffer = noop_set_index_buffer;
ctx->set_viewport_states = noop_set_viewport_states;
ctx->sampler_view_destroy = noop_sampler_view_destroy;
ctx->surface_destroy = noop_surface_destroy;
ctx->draw_vbo = noop_draw_vbo;
ctx->launch_grid = noop_launch_grid;
ctx->create_stream_output_target = noop_create_stream_output_target;
ctx->stream_output_target_destroy = noop_stream_output_target_destroy;
ctx->set_stream_output_targets = noop_set_stream_output_targets;
}
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_context.c b/src/gallium/drivers/nouveau/nv30/nv30_context.c
index cec3cd0..e137525 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_context.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_context.c
@@ -116,27 +116,20 @@ nv30_invalidate_resource_storage(struct nouveau_context *nv,
if (res->bind & PIPE_BIND_VERTEX_BUFFER) {
for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (nv30->vtxbuf[i].buffer.resource == res) {
nv30->dirty |= NV30_NEW_ARRAYS;
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
if (!--ref)
return ref;
}
}
}
- if (res->bind & PIPE_BIND_INDEX_BUFFER) {
- if (nv30->idxbuf.buffer == res) {
- nouveau_bufctx_reset(nv30->bufctx, BUFCTX_IDXBUF);
- if (!--ref)
- return ref;
- }
- }
if (res->bind & PIPE_BIND_SAMPLER_VIEW) {
for (i = 0; i < nv30->fragprog.num_textures; ++i) {
if (nv30->fragprog.textures[i] &&
nv30->fragprog.textures[i]->texture == res) {
nv30->dirty |= NV30_NEW_FRAGTEX;
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_FRAGTEX(i));
if (!--ref)
return ref;
}
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_context.h b/src/gallium/drivers/nouveau/nv30/nv30_context.h
index 0ab2f95..1496b37 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_context.h
+++ b/src/gallium/drivers/nouveau/nv30/nv30_context.h
@@ -103,21 +103,20 @@ struct nv30_context {
struct pipe_stencil_ref stencil_ref;
struct pipe_poly_stipple stipple;
struct pipe_scissor_state scissor;
struct pipe_viewport_state viewport;
struct pipe_clip_state clip;
unsigned sample_mask;
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs;
- struct pipe_index_buffer idxbuf;
uint32_t vbo_fifo;
uint32_t vbo_user;
unsigned vbo_min_index;
unsigned vbo_max_index;
bool vbo_push_hint;
struct nouveau_heap *blit_vp;
struct pipe_resource *blit_fp;
struct pipe_query *render_cond_query;
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_draw.c b/src/gallium/drivers/nouveau/nv30/nv30_draw.c
index 28d3de9..4c587fc 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_draw.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_draw.c
@@ -423,37 +423,37 @@ nv30_render_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
nv30->vtxbuf[i].buffer.user : NULL;
if (!map) {
if (nv30->vtxbuf[i].buffer.resource)
map = pipe_buffer_map(pipe, nv30->vtxbuf[i].buffer.resource,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transfer[i]);
}
draw_set_mapped_vertex_buffer(draw, i, map, ~0);
}
- if (info->indexed) {
- const void *map = nv30->idxbuf.user_buffer;
+ if (info->index_size) {
+ const void *map = info->has_user_indices ? info->index.user : NULL;
if (!map)
- map = pipe_buffer_map(pipe, nv30->idxbuf.buffer,
+ map = pipe_buffer_map(pipe, info->index.resource,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ, &transferi);
draw_set_indexes(draw,
- (ubyte *) map + nv30->idxbuf.offset,
- nv30->idxbuf.index_size, ~0);
+ (ubyte *) map,
+ info->index_size, ~0);
} else {
draw_set_indexes(draw, NULL, 0, 0);
}
draw_vbo(draw, info);
draw_flush(draw);
- if (info->indexed && transferi)
+ if (info->index_size && transferi)
pipe_buffer_unmap(pipe, transferi);
for (i = 0; i < nv30->num_vtxbufs; i++)
if (transfer[i])
pipe_buffer_unmap(pipe, transfer[i]);
nv30->draw_dirty = 0;
nv30_state_release(nv30);
}
static void
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_push.c b/src/gallium/drivers/nouveau/nv30/nv30_push.c
index 90adfa0..fc8520b 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_push.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_push.c
@@ -192,21 +192,21 @@ emit_vertices_seq(struct push_context *ctx, unsigned start, unsigned count)
count -= push;
start += push;
}
}
void
nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
{
struct push_context ctx;
unsigned i, index_size;
- bool apply_bias = info->indexed && info->index_bias;
+ bool apply_bias = info->index_size && info->index_bias;
ctx.push = nv30->base.pushbuf;
ctx.translate = nv30->vertex->translate;
ctx.packet_vertex_limit = nv30->vertex->vtx_per_packet_max;
ctx.vertex_words = nv30->vertex->vtx_size;
for (i = 0; i < nv30->num_vtxbufs; ++i) {
uint8_t *data;
struct pipe_vertex_buffer *vb = &nv30->vtxbuf[i];
struct nv04_resource *res = nv04_resource(vb->buffer.resource);
@@ -217,32 +217,32 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
data = nouveau_resource_map_offset(&nv30->base, res,
vb->buffer_offset, NOUVEAU_BO_RD);
if (apply_bias)
data += info->index_bias * vb->stride;
ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
}
- if (info->indexed) {
- if (nv30->idxbuf.buffer)
+ if (info->index_size) {
+ if (!info->has_user_indices)
ctx.idxbuf = nouveau_resource_map_offset(&nv30->base,
- nv04_resource(nv30->idxbuf.buffer), nv30->idxbuf.offset,
+ nv04_resource(info->index.resource), info->start * info->index_size,
NOUVEAU_BO_RD);
else
- ctx.idxbuf = nv30->idxbuf.user_buffer;
+ ctx.idxbuf = info->index.user;
if (!ctx.idxbuf) {
nv30_state_release(nv30);
return;
}
- index_size = nv30->idxbuf.index_size;
+ index_size = info->index_size;
ctx.primitive_restart = info->primitive_restart;
ctx.restart_index = info->restart_index;
} else {
ctx.idxbuf = NULL;
index_size = 0;
ctx.primitive_restart = false;
ctx.restart_index = 0;
}
if (nv30->screen->eng3d->oclass >= NV40_3D_CLASS) {
@@ -270,21 +270,21 @@ nv30_push_vbo(struct nv30_context *nv30, const struct pipe_draw_info *info)
case 4:
emit_vertices_i32(&ctx, info->start, info->count);
break;
default:
assert(0);
break;
}
BEGIN_NV04(ctx.push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (ctx.push, NV30_3D_VERTEX_BEGIN_END_STOP);
- if (info->indexed)
- nouveau_resource_unmap(nv04_resource(nv30->idxbuf.buffer));
+ if (info->index_size && !info->has_user_indices)
+ nouveau_resource_unmap(nv04_resource(info->index.resource));
for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (nv30->vtxbuf[i].buffer.resource) {
nouveau_resource_unmap(nv04_resource(nv30->vtxbuf[i].buffer.resource));
}
}
nv30_state_release(nv30);
}
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_resource.c b/src/gallium/drivers/nouveau/nv30/nv30_resource.c
index d5842dd..ff34f6e 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_resource.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_resource.c
@@ -37,24 +37,20 @@ nv30_memory_barrier(struct pipe_context *pipe, unsigned flags)
struct nv30_context *nv30 = nv30_context(pipe);
int i;
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nv30->num_vtxbufs; ++i) {
if (!nv30->vtxbuf[i].buffer.resource)
continue;
if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv30->base.vbo_dirty = true;
}
-
- if (nv30->idxbuf.buffer &&
- nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv30->base.vbo_dirty = true;
}
}
static struct pipe_resource *
nv30_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
{
switch (tmpl->target) {
case PIPE_BUFFER:
return nouveau_buffer_create(pscreen, tmpl);
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_state.c b/src/gallium/drivers/nouveau/nv30/nv30_state.c
index 16b668b..2a81225 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_state.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_state.c
@@ -431,37 +431,20 @@ nv30_set_vertex_buffers(struct pipe_context *pipe,
struct nv30_context *nv30 = nv30_context(pipe);
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
util_set_vertex_buffers_count(nv30->vtxbuf, &nv30->num_vtxbufs,
vb, start_slot, count);
nv30->dirty |= NV30_NEW_ARRAYS;
}
-static void
-nv30_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct nv30_context *nv30 = nv30_context(pipe);
-
- if (ib) {
- pipe_resource_reference(&nv30->idxbuf.buffer, ib->buffer);
- nv30->idxbuf.index_size = ib->index_size;
- nv30->idxbuf.offset = ib->offset;
- nv30->idxbuf.user_buffer = ib->user_buffer;
- } else {
- pipe_resource_reference(&nv30->idxbuf.buffer, NULL);
- nv30->idxbuf.user_buffer = NULL;
- }
-}
-
void
nv30_state_init(struct pipe_context *pipe)
{
pipe->create_blend_state = nv30_blend_state_create;
pipe->bind_blend_state = nv30_blend_state_bind;
pipe->delete_blend_state = nv30_blend_state_delete;
pipe->create_rasterizer_state = nv30_rasterizer_state_create;
pipe->bind_rasterizer_state = nv30_rasterizer_state_bind;
pipe->delete_rasterizer_state = nv30_rasterizer_state_delete;
@@ -474,12 +457,11 @@ nv30_state_init(struct pipe_context *pipe)
pipe->set_stencil_ref = nv30_set_stencil_ref;
pipe->set_clip_state = nv30_set_clip_state;
pipe->set_sample_mask = nv30_set_sample_mask;
pipe->set_constant_buffer = nv30_set_constant_buffer;
pipe->set_framebuffer_state = nv30_set_framebuffer_state;
pipe->set_polygon_stipple = nv30_set_polygon_stipple;
pipe->set_scissor_states = nv30_set_scissor_states;
pipe->set_viewport_states = nv30_set_viewport_states;
pipe->set_vertex_buffers = nv30_set_vertex_buffers;
- pipe->set_index_buffer = nv30_set_index_buffer;
}
diff --git a/src/gallium/drivers/nouveau/nv30/nv30_vbo.c b/src/gallium/drivers/nouveau/nv30/nv30_vbo.c
index d049b55..bb0a8a0 100644
--- a/src/gallium/drivers/nouveau/nv30/nv30_vbo.c
+++ b/src/gallium/drivers/nouveau/nv30/nv30_vbo.c
@@ -452,38 +452,39 @@ nv30_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
BEGIN_NI04(push, NV30_3D(VB_ELEMENT_U16), npush);
while (npush--) {
PUSH_DATA (push, (map[1] << 16) | map[0]);
map += 2;
}
}
}
static void
nv30_draw_elements(struct nv30_context *nv30, bool shorten,
+ const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count,
- unsigned instance_count, int32_t index_bias)
+ unsigned instance_count, int32_t index_bias,
+ unsigned index_size)
{
- const unsigned index_size = nv30->idxbuf.index_size;
struct nouveau_pushbuf *push = nv30->base.pushbuf;
struct nouveau_object *eng3d = nv30->screen->eng3d;
unsigned prim = nv30_prim_gl(mode);
if (eng3d->oclass >= NV40_3D_CLASS && index_bias != nv30->state.index_bias) {
BEGIN_NV04(push, NV40_3D(VB_ELEMENT_BASE), 1);
PUSH_DATA (push, index_bias);
nv30->state.index_bias = index_bias;
}
if (eng3d->oclass == NV40_3D_CLASS && index_size > 1 &&
- nv30->idxbuf.buffer) {
- struct nv04_resource *res = nv04_resource(nv30->idxbuf.buffer);
- unsigned offset = nv30->idxbuf.offset;
+ !info->has_user_indices) {
+ struct nv04_resource *res = nv04_resource(info->index.resource);
+ unsigned offset = 0;
assert(nouveau_resource_mapped_by_gpu(&res->base));
BEGIN_NV04(push, NV30_3D(IDXBUF_OFFSET), 2);
PUSH_RESRC(push, NV30_3D(IDXBUF_OFFSET), BUFCTX_IDXBUF, res, offset,
NOUVEAU_BO_LOW | NOUVEAU_BO_RD, 0, 0);
PUSH_MTHD (push, NV30_3D(IDXBUF_FORMAT), BUFCTX_IDXBUF, res->bo,
(index_size == 2) ? 0x00000010 : 0x00000000,
res->domain | NOUVEAU_BO_RD,
0, NV30_3D_IDXBUF_FORMAT_DMA1);
@@ -504,26 +505,26 @@ nv30_draw_elements(struct nv30_context *nv30, bool shorten,
}
if (npush)
PUSH_DATA (push, ((npush - 1) << 24) | start);
}
BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (push, NV30_3D_VERTEX_BEGIN_END_STOP);
PUSH_RESET(push, BUFCTX_IDXBUF);
} else {
const void *data;
- if (nv30->idxbuf.buffer)
+ if (!info->has_user_indices)
data = nouveau_resource_map_offset(&nv30->base,
- nv04_resource(nv30->idxbuf.buffer),
- nv30->idxbuf.offset, NOUVEAU_BO_RD);
+ nv04_resource(info->index.resource),
+ start * index_size, NOUVEAU_BO_RD);
else
- data = nv30->idxbuf.user_buffer;
+ data = info->index.user;
if (!data)
return;
BEGIN_NV04(push, NV30_3D(VERTEX_BEGIN_END), 1);
PUSH_DATA (push, prim);
switch (index_size) {
case 1:
nv30_draw_elements_inline_u08(push, data, start, count);
break;
case 2:
@@ -552,21 +553,21 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
int i;
if (!info->primitive_restart &&
!u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
return;
/* For picking only a few vertices from a large user buffer, push is better,
* if index count is larger and we expect repeated vertices, suggest upload.
*/
nv30->vbo_push_hint = /* the 64 is heuristic */
- !(info->indexed &&
+ !(info->index_size &&
((info->max_index - info->min_index + 64) < info->count));
nv30->vbo_min_index = info->min_index;
nv30->vbo_max_index = info->max_index;
if (nv30->vbo_push_hint != !!nv30->vbo_fifo)
nv30->dirty |= NV30_NEW_ARRAYS;
push->user_priv = &nv30->bufctx;
if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
@@ -582,31 +583,31 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
return;
}
for (i = 0; i < nv30->num_vtxbufs && !nv30->base.vbo_dirty; ++i) {
if (!nv30->vtxbuf[i].buffer.resource)
continue;
if (nv30->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv30->base.vbo_dirty = true;
}
- if (!nv30->base.vbo_dirty && nv30->idxbuf.buffer &&
- nv30->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ if (!nv30->base.vbo_dirty && info->index_size && !info->has_user_indices &&
+ info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv30->base.vbo_dirty = true;
if (nv30->base.vbo_dirty) {
BEGIN_NV04(push, NV30_3D(VTX_CACHE_INVALIDATE_1710), 1);
PUSH_DATA (push, 0);
nv30->base.vbo_dirty = false;
}
- if (!info->indexed) {
+ if (!info->index_size) {
nv30_draw_arrays(nv30,
info->mode, info->start, info->count,
info->instance_count);
} else {
bool shorten = info->max_index <= 65535;
if (info->primitive_restart != nv30->state.prim_restart) {
if (info->primitive_restart) {
BEGIN_NV04(push, NV40_3D(PRIM_RESTART_ENABLE), 2);
PUSH_DATA (push, 1);
@@ -621,23 +622,23 @@ nv30_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
nv30->state.prim_restart = info->primitive_restart;
} else
if (info->primitive_restart) {
BEGIN_NV04(push, NV40_3D(PRIM_RESTART_INDEX), 1);
PUSH_DATA (push, info->restart_index);
if (info->restart_index > 65535)
shorten = false;
}
- nv30_draw_elements(nv30, shorten,
+ nv30_draw_elements(nv30, shorten, info,
info->mode, info->start, info->count,
- info->instance_count, info->index_bias);
+ info->instance_count, info->index_bias, info->index_size);
}
nv30_state_release(nv30);
nv30_release_user_vbufs(nv30);
}
void
nv30_vbo_init(struct pipe_context *pipe)
{
pipe->create_vertex_elements_state = nv30_vertex_state_create;
diff --git a/src/gallium/drivers/nouveau/nv50/nv50_context.c b/src/gallium/drivers/nouveau/nv50/nv50_context.c
index d072927..d2c37ac 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_context.c
+++ b/src/gallium/drivers/nouveau/nv50/nv50_context.c
@@ -61,24 +61,20 @@ nv50_memory_barrier(struct pipe_context *pipe, unsigned flags)
int i, s;
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nv50->num_vtxbufs; ++i) {
if (!nv50->vtxbuf[i].buffer.resource && !nv50->vtxbuf[i].is_user_buffer)
continue;
if (nv50->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nv50->base.vbo_dirty = true;
}
- if (nv50->idxbuf.buffer &&
- nv50->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nv50->base.vbo_dirty = true;
-
for (s = 0; s < 3 && !nv50->cb_dirty; ++s) {
uint32_t valid = nv50->constbuf_valid[s];
while (valid && !nv50->cb_dirty) {
const unsigned i = ffs(valid) - 1;
struct pipe_resource *res;
valid &= ~(1 << i);
if (nv50->constbuf[s][i].user)
continue;
@@ -139,22 +135,20 @@ nv50_context_unreference_resources(struct nv50_context *nv50)
nouveau_bufctx_del(&nv50->bufctx_3d);
nouveau_bufctx_del(&nv50->bufctx);
nouveau_bufctx_del(&nv50->bufctx_cp);
util_unreference_framebuffer_state(&nv50->framebuffer);
assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i)
pipe_resource_reference(&nv50->vtxbuf[i].buffer.resource, NULL);
- pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
-
for (s = 0; s < 3; ++s) {
assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
for (i = 0; i < nv50->num_textures[s]; ++i)
pipe_sampler_view_reference(&nv50->textures[s][i], NULL);
for (i = 0; i < NV50_MAX_PIPE_CONSTBUFS; ++i)
if (!nv50->constbuf[s][i].user)
pipe_resource_reference(&nv50->constbuf[s][i].u.buf, NULL);
}
@@ -231,28 +225,20 @@ nv50_invalidate_resource_storage(struct nouveau_context *ctx,
assert(nv50->num_vtxbufs <= PIPE_MAX_ATTRIBS);
for (i = 0; i < nv50->num_vtxbufs; ++i) {
if (nv50->vtxbuf[i].buffer.resource == res) {
nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_VERTEX);
if (!--ref)
return ref;
}
}
- if (nv50->idxbuf.buffer == res) {
- /* Just rebind to the bufctx as there is no separate dirty bit */
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
- BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(res), RD);
- if (!--ref)
- return ref;
- }
-
for (s = 0; s < 3; ++s) {
assert(nv50->num_textures[s] <= PIPE_MAX_SAMPLERS);
for (i = 0; i < nv50->num_textures[s]; ++i) {
if (nv50->textures[s][i] &&
nv50->textures[s][i]->texture == res) {
nv50->dirty_3d |= NV50_NEW_3D_TEXTURES;
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_TEXTURES);
if (!--ref)
return ref;
}
diff --git a/src/gallium/drivers/nouveau/nv50/nv50_context.h b/src/gallium/drivers/nouveau/nv50/nv50_context.h
index cca44f5..224535a 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_context.h
+++ b/src/gallium/drivers/nouveau/nv50/nv50_context.h
@@ -136,21 +136,20 @@ struct nv50_context {
struct nv50_program *compprog;
struct nv50_constbuf constbuf[3][NV50_MAX_PIPE_CONSTBUFS];
uint16_t constbuf_dirty[3];
uint16_t constbuf_valid[3];
uint16_t constbuf_coherent[3];
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs;
uint32_t vtxbufs_coherent;
- struct pipe_index_buffer idxbuf;
uint32_t vbo_fifo; /* bitmask of vertex elements to be pushed to FIFO */
uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */
uint32_t vbo_constant; /* bitmask of user buffers with stride 0 */
uint32_t vb_elt_first; /* from pipe_draw_info, for vertex upload */
uint32_t vb_elt_limit; /* max - min element (count - 1) */
uint32_t instance_off; /* base vertex for instanced arrays */
uint32_t instance_max; /* max instance for current draw call */
struct pipe_sampler_view *textures[3][PIPE_MAX_SAMPLERS];
unsigned num_textures[3];
diff --git a/src/gallium/drivers/nouveau/nv50/nv50_push.c b/src/gallium/drivers/nouveau/nv50/nv50_push.c
index d341901..9ee9a8e 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_push.c
+++ b/src/gallium/drivers/nouveau/nv50/nv50_push.c
@@ -237,21 +237,21 @@ nv50_prim_gl(unsigned prim)
}
}
void
nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
{
struct push_context ctx;
unsigned i, index_size;
unsigned inst_count = info->instance_count;
unsigned vert_count = info->count;
- bool apply_bias = info->indexed && info->index_bias;
+ bool apply_bias = info->index_size && info->index_bias;
ctx.push = nv50->base.pushbuf;
ctx.translate = nv50->vertex->translate;
ctx.need_vertex_id = nv50->screen->base.class_3d >= NV84_3D_CLASS &&
nv50->vertprog->vp.need_vertex_id && (nv50->vertex->num_elements < 32);
ctx.index_bias = info->index_bias;
ctx.instance_id = 0;
/* For indexed draws, gl_VertexID must be emitted for every vertex. */
@@ -269,31 +269,31 @@ nv50_push_vbo(struct nv50_context *nv50, const struct pipe_draw_info *info)
nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
else
data = vb->buffer.user;
if (apply_bias && likely(!(nv50->vertex->instance_bufs & (1 << i))))
data += (ptrdiff_t)info->index_bias * vb->stride;
ctx.translate->set_buffer(ctx.translate, i, data, vb->stride, ~0);
}
- if (info->indexed) {
- if (nv50->idxbuf.buffer) {
+ if (info->index_size) {
+ if (!info->has_user_indices) {
ctx.idxbuf = nouveau_resource_map_offset(&nv50->base,
- nv04_resource(nv50->idxbuf.buffer), nv50->idxbuf.offset,
+ nv04_resource(info->index.resource), info->start * info->index_size,
NOUVEAU_BO_RD);
} else {
- ctx.idxbuf = nv50->idxbuf.user_buffer;
+ ctx.idxbuf = info->index.user;
}
if (!ctx.idxbuf)
return;
- index_size = nv50->idxbuf.index_size;
+ index_size = info->index_size;
ctx.primitive_restart = info->primitive_restart;
ctx.restart_index = info->restart_index;
} else {
if (unlikely(info->count_from_stream_output)) {
struct pipe_context *pipe = &nv50->base.pipe;
struct nv50_so_target *targ;
targ = nv50_so_target(info->count_from_stream_output);
if (!targ->pq) {
NOUVEAU_ERR("draw_stream_output not supported on pre-NVA0 cards\n");
return;
diff --git a/src/gallium/drivers/nouveau/nv50/nv50_state.c b/src/gallium/drivers/nouveau/nv50/nv50_state.c
index d5af6c9..a7d86b0 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_state.c
+++ b/src/gallium/drivers/nouveau/nv50/nv50_state.c
@@ -1074,43 +1074,20 @@ nv50_set_vertex_buffers(struct pipe_context *pipe,
if (vb[i].buffer.resource &&
vb[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nv50->vtxbufs_coherent |= (1 << dst_index);
else
nv50->vtxbufs_coherent &= ~(1 << dst_index);
}
}
}
static void
-nv50_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct nv50_context *nv50 = nv50_context(pipe);
-
- if (nv50->idxbuf.buffer)
- nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
-
- if (ib) {
- pipe_resource_reference(&nv50->idxbuf.buffer, ib->buffer);
- nv50->idxbuf.index_size = ib->index_size;
- if (ib->buffer) {
- nv50->idxbuf.offset = ib->offset;
- BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(ib->buffer), RD);
- } else {
- nv50->idxbuf.user_buffer = ib->user_buffer;
- }
- } else {
- pipe_resource_reference(&nv50->idxbuf.buffer, NULL);
- }
-}
-
-static void
nv50_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
{
struct nv50_context *nv50 = nv50_context(pipe);
nv50->vertex = hwcso;
nv50->dirty_3d |= NV50_NEW_3D_VERTEX;
}
static struct pipe_stream_output_target *
nv50_so_target_create(struct pipe_context *pipe,
@@ -1334,21 +1311,20 @@ nv50_init_state_functions(struct nv50_context *nv50)
pipe->set_polygon_stipple = nv50_set_polygon_stipple;
pipe->set_scissor_states = nv50_set_scissor_states;
pipe->set_viewport_states = nv50_set_viewport_states;
pipe->set_window_rectangles = nv50_set_window_rectangles;
pipe->create_vertex_elements_state = nv50_vertex_state_create;
pipe->delete_vertex_elements_state = nv50_vertex_state_delete;
pipe->bind_vertex_elements_state = nv50_vertex_state_bind;
pipe->set_vertex_buffers = nv50_set_vertex_buffers;
- pipe->set_index_buffer = nv50_set_index_buffer;
pipe->create_stream_output_target = nv50_so_target_create;
pipe->stream_output_target_destroy = nv50_so_target_destroy;
pipe->set_stream_output_targets = nv50_set_stream_output_targets;
pipe->set_global_binding = nv50_set_global_bindings;
pipe->set_compute_resources = nv50_set_compute_resources;
nv50->sample_mask = ~0;
nv50->min_samples = 1;
diff --git a/src/gallium/drivers/nouveau/nv50/nv50_vbo.c b/src/gallium/drivers/nouveau/nv50/nv50_vbo.c
index 60970d7..37dca97 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_vbo.c
+++ b/src/gallium/drivers/nouveau/nv50/nv50_vbo.c
@@ -588,48 +588,49 @@ nv50_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
for (i = 0; i < nr; ++i) {
PUSH_DATA(push, (map[1] << 16) | map[0]);
map += 2;
}
count -= nr * 2;
}
}
static void
nv50_draw_elements(struct nv50_context *nv50, bool shorten,
+ const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count,
- unsigned instance_count, int32_t index_bias)
+ unsigned instance_count, int32_t index_bias,
+ unsigned index_size)
{
struct nouveau_pushbuf *push = nv50->base.pushbuf;
unsigned prim;
- const unsigned index_size = nv50->idxbuf.index_size;
prim = nv50_prim_gl(mode);
if (index_bias != nv50->state.index_bias) {
BEGIN_NV04(push, NV50_3D(VB_ELEMENT_BASE), 1);
PUSH_DATA (push, index_bias);
if (nv50->screen->base.class_3d >= NV84_3D_CLASS) {
BEGIN_NV04(push, NV84_3D(VERTEX_ID_BASE), 1);
PUSH_DATA (push, index_bias);
}
nv50->state.index_bias = index_bias;
}
- if (nv50->idxbuf.buffer) {
- struct nv04_resource *buf = nv04_resource(nv50->idxbuf.buffer);
+ if (!info->has_user_indices) {
+ struct nv04_resource *buf = nv04_resource(info->index.resource);
unsigned pb_start;
unsigned pb_bytes;
- const unsigned base = (buf->offset + nv50->idxbuf.offset) & ~3;
+ const unsigned base = buf->offset & ~3;
- start += ((buf->offset + nv50->idxbuf.offset) & 3) >> (index_size >> 1);
+ start += (buf->offset & 3) >> (index_size >> 1);
- assert(nouveau_resource_mapped_by_gpu(nv50->idxbuf.buffer));
+ assert(nouveau_resource_mapped_by_gpu(info->index.resource));
/* This shouldn't have to be here. The going theory is that the buffer
* is being filled in by PGRAPH, and it's not done yet by the time it
* gets submitted to PFIFO, which in turn starts immediately prefetching
* the not-yet-written data. Ideally this wait would only happen on
* pushbuf submit, but it's probably not a big performance difference.
*/
if (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr))
nouveau_fence_wait(buf->fence_wr, &nv50->base.debug);
@@ -668,21 +669,21 @@ nv50_draw_elements(struct nv50_context *nv50, bool shorten,
BEGIN_NV04(push, NV50_3D(VB_ELEMENT_U8_SETUP), 1);
PUSH_DATA (push, 0);
break;
}
BEGIN_NV04(push, NV50_3D(VERTEX_END_GL), 1);
PUSH_DATA (push, 0);
prim |= NV50_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT;
}
} else {
- const void *data = nv50->idxbuf.user_buffer;
+ const void *data = info->index.user;
while (instance_count--) {
BEGIN_NV04(push, NV50_3D(VERTEX_BEGIN_GL), 1);
PUSH_DATA (push, prim);
switch (index_size) {
case 1:
nv50_draw_elements_inline_u08(push, data, start, count);
break;
case 2:
nv50_draw_elements_inline_u16(push, data, start, count);
@@ -762,31 +763,36 @@ nv50_draw_vbo_kick_notify(struct nouveau_pushbuf *chan)
}
void
nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
struct nv50_context *nv50 = nv50_context(pipe);
struct nouveau_pushbuf *push = nv50->base.pushbuf;
bool tex_dirty = false;
int s;
+ if (info->index_size && !info->has_user_indices) {
+ nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_INDEX);
+ BCTX_REFN(nv50->bufctx_3d, 3D_INDEX, nv04_resource(info->index.resource), RD);
+ }
+
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nv50->vb_elt_first = info->min_index + info->index_bias;
nv50->vb_elt_limit = info->max_index - info->min_index;
nv50->instance_off = info->start_instance;
nv50->instance_max = info->instance_count - 1;
/* For picking only a few vertices from a large user buffer, push is better,
* if index count is larger and we expect repeated vertices, suggest upload.
*/
nv50->vbo_push_hint = /* the 64 is heuristic */
- !(info->indexed && ((nv50->vb_elt_limit + 64) < info->count));
+ !(info->index_size && ((nv50->vb_elt_limit + 64) < info->count));
if (nv50->vbo_user && !(nv50->dirty_3d & (NV50_NEW_3D_ARRAYS | NV50_NEW_3D_VERTEX))) {
if (!!nv50->vbo_fifo != nv50->vbo_push_hint)
nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
else
if (!nv50->vbo_fifo)
nv50_update_user_vbufs(nv50);
}
if (unlikely(nv50->num_so_targets && !nv50->gmtyprog))
@@ -846,21 +852,21 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
}
nv50->base.vbo_dirty |= !!nv50->vtxbufs_coherent;
if (nv50->base.vbo_dirty) {
BEGIN_NV04(push, NV50_3D(VERTEX_ARRAY_FLUSH), 1);
PUSH_DATA (push, 0);
nv50->base.vbo_dirty = false;
}
- if (info->indexed) {
+ if (info->index_size) {
bool shorten = info->max_index <= 65535;
if (info->primitive_restart != nv50->state.prim_restart) {
if (info->primitive_restart) {
BEGIN_NV04(push, NV50_3D(PRIM_RESTART_ENABLE), 2);
PUSH_DATA (push, 1);
PUSH_DATA (push, info->restart_index);
if (info->restart_index > 65535)
shorten = false;
@@ -871,23 +877,23 @@ nv50_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
nv50->state.prim_restart = info->primitive_restart;
} else
if (info->primitive_restart) {
BEGIN_NV04(push, NV50_3D(PRIM_RESTART_INDEX), 1);
PUSH_DATA (push, info->restart_index);
if (info->restart_index > 65535)
shorten = false;
}
- nv50_draw_elements(nv50, shorten,
+ nv50_draw_elements(nv50, shorten, info,
info->mode, info->start, info->count,
- info->instance_count, info->index_bias);
+ info->instance_count, info->index_bias, info->index_size);
} else
if (unlikely(info->count_from_stream_output)) {
nva0_draw_stream_output(nv50, info);
} else {
nv50_draw_arrays(nv50,
info->mode, info->start, info->count,
info->instance_count);
}
push->kick_notify = nv50_default_kick_notify;
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_context.c b/src/gallium/drivers/nouveau/nvc0/nvc0_context.c
index ef61256..59edd3d 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_context.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_context.c
@@ -61,24 +61,20 @@ nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
int i, s;
if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
if (!nvc0->vtxbuf[i].buffer.resource && !nvc0->vtxbuf[i].is_user_buffer)
continue;
if (nvc0->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
nvc0->base.vbo_dirty = true;
}
- if (nvc0->idxbuf.buffer &&
- nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
- nvc0->base.vbo_dirty = true;
-
for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
uint32_t valid = nvc0->constbuf_valid[s];
while (valid && !nvc0->cb_dirty) {
const unsigned i = ffs(valid) - 1;
struct pipe_resource *res;
valid &= ~(1 << i);
if (nvc0->constbuf[s][i].user)
continue;
@@ -142,22 +138,20 @@ nvc0_context_unreference_resources(struct nvc0_context *nvc0)
nouveau_bufctx_del(&nvc0->bufctx_3d);
nouveau_bufctx_del(&nvc0->bufctx);
nouveau_bufctx_del(&nvc0->bufctx_cp);
util_unreference_framebuffer_state(&nvc0->framebuffer);
for (i = 0; i < nvc0->num_vtxbufs; ++i)
pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]);
- pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
-
for (s = 0; s < 6; ++s) {
for (i = 0; i < nvc0->num_textures[s]; ++i)
pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
for (i = 0; i < NVC0_MAX_PIPE_CONSTBUFS; ++i)
if (!nvc0->constbuf[s][i].user)
pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, NULL);
for (i = 0; i < NVC0_MAX_BUFFERS; ++i)
pipe_resource_reference(&nvc0->buffers[s][i].buffer, NULL);
@@ -261,27 +255,20 @@ nvc0_invalidate_resource_storage(struct nouveau_context *ctx,
if (res->target == PIPE_BUFFER) {
for (i = 0; i < nvc0->num_vtxbufs; ++i) {
if (nvc0->vtxbuf[i].buffer.resource == res) {
nvc0->dirty_3d |= NVC0_NEW_3D_ARRAYS;
nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX);
if (!--ref)
return ref;
}
}
- if (nvc0->idxbuf.buffer == res) {
- nvc0->dirty_3d |= NVC0_NEW_3D_IDXBUF;
- nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
- if (!--ref)
- return ref;
- }
-
for (s = 0; s < 6; ++s) {
for (i = 0; i < nvc0->num_textures[s]; ++i) {
if (nvc0->textures[s][i] &&
nvc0->textures[s][i]->texture == res) {
nvc0->textures_dirty[s] |= 1 << i;
if (unlikely(s == 5)) {
nvc0->dirty_cp |= NVC0_NEW_CP_TEXTURES;
nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_TEX(i));
} else {
nvc0->dirty_3d |= NVC0_NEW_3D_TEXTURES;
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_context.h b/src/gallium/drivers/nouveau/nvc0/nvc0_context.h
index bd6f752..6f631b9 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_context.h
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_context.h
@@ -46,21 +46,21 @@
#define NVC0_NEW_3D_FRAMEBUFFER (1 << 12)
#define NVC0_NEW_3D_STIPPLE (1 << 13)
#define NVC0_NEW_3D_SCISSOR (1 << 14)
#define NVC0_NEW_3D_VIEWPORT (1 << 15)
#define NVC0_NEW_3D_ARRAYS (1 << 16)
#define NVC0_NEW_3D_VERTEX (1 << 17)
#define NVC0_NEW_3D_CONSTBUF (1 << 18)
#define NVC0_NEW_3D_TEXTURES (1 << 19)
#define NVC0_NEW_3D_SAMPLERS (1 << 20)
#define NVC0_NEW_3D_TFB_TARGETS (1 << 21)
-#define NVC0_NEW_3D_IDXBUF (1 << 22)
+
#define NVC0_NEW_3D_SURFACES (1 << 23)
#define NVC0_NEW_3D_MIN_SAMPLES (1 << 24)
#define NVC0_NEW_3D_TESSFACTOR (1 << 25)
#define NVC0_NEW_3D_BUFFERS (1 << 26)
#define NVC0_NEW_3D_DRIVERCONST (1 << 27)
#define NVC0_NEW_3D_WINDOW_RECTS (1 << 28)
#define NVC0_NEW_CP_PROGRAM (1 << 0)
#define NVC0_NEW_CP_SURFACES (1 << 1)
#define NVC0_NEW_CP_TEXTURES (1 << 2)
@@ -186,21 +186,20 @@ struct nvc0_context {
struct nvc0_constbuf constbuf[6][NVC0_MAX_PIPE_CONSTBUFS];
uint16_t constbuf_dirty[6];
uint16_t constbuf_valid[6];
uint16_t constbuf_coherent[6];
bool cb_dirty;
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
unsigned num_vtxbufs;
uint32_t vtxbufs_coherent;
- struct pipe_index_buffer idxbuf;
uint32_t constant_vbos;
uint32_t vbo_user; /* bitmask of vertex buffers pointing to user memory */
uint32_t vb_elt_first; /* from pipe_draw_info, for vertex upload */
uint32_t vb_elt_limit; /* max - min element (count - 1) */
uint32_t instance_off; /* current base vertex for instanced arrays */
uint32_t instance_max; /* last instance for current draw call */
struct pipe_sampler_view *textures[6][PIPE_MAX_SAMPLERS];
unsigned num_textures[6];
uint32_t textures_dirty[6];
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_state.c b/src/gallium/drivers/nouveau/nvc0/nvc0_state.c
index bf33746..99d45a2 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_state.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_state.c
@@ -955,45 +955,20 @@ nvc0_set_vertex_buffers(struct pipe_context *pipe,
if (vb[i].buffer.resource &&
vb[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nvc0->vtxbufs_coherent |= (1 << dst_index);
else
nvc0->vtxbufs_coherent &= ~(1 << dst_index);
}
}
}
static void
-nvc0_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct nvc0_context *nvc0 = nvc0_context(pipe);
-
- if (nvc0->idxbuf.buffer)
- nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
-
- if (ib) {
- pipe_resource_reference(&nvc0->idxbuf.buffer, ib->buffer);
- nvc0->idxbuf.index_size = ib->index_size;
- if (ib->buffer) {
- nvc0->idxbuf.offset = ib->offset;
- nvc0->dirty_3d |= NVC0_NEW_3D_IDXBUF;
- } else {
- nvc0->idxbuf.user_buffer = ib->user_buffer;
- nvc0->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
- }
- } else {
- nvc0->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
- pipe_resource_reference(&nvc0->idxbuf.buffer, NULL);
- }
-}
-
-static void
nvc0_vertex_state_bind(struct pipe_context *pipe, void *hwcso)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
nvc0->vertex = hwcso;
nvc0->dirty_3d |= NVC0_NEW_3D_VERTEX;
}
static struct pipe_stream_output_target *
nvc0_so_target_create(struct pipe_context *pipe,
@@ -1419,21 +1394,20 @@ nvc0_init_state_functions(struct nvc0_context *nvc0)
pipe->set_scissor_states = nvc0_set_scissor_states;
pipe->set_viewport_states = nvc0_set_viewport_states;
pipe->set_window_rectangles = nvc0_set_window_rectangles;
pipe->set_tess_state = nvc0_set_tess_state;
pipe->create_vertex_elements_state = nvc0_vertex_state_create;
pipe->delete_vertex_elements_state = nvc0_vertex_state_delete;
pipe->bind_vertex_elements_state = nvc0_vertex_state_bind;
pipe->set_vertex_buffers = nvc0_set_vertex_buffers;
- pipe->set_index_buffer = nvc0_set_index_buffer;
pipe->create_stream_output_target = nvc0_so_target_create;
pipe->stream_output_target_destroy = nvc0_so_target_destroy;
pipe->set_stream_output_targets = nvc0_set_transform_feedback_targets;
pipe->set_global_binding = nvc0_set_global_bindings;
pipe->set_compute_resources = nvc0_set_compute_resources;
pipe->set_shader_images = nvc0_set_shader_images;
pipe->set_shader_buffers = nvc0_set_shader_buffers;
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_state_validate.c b/src/gallium/drivers/nouveau/nvc0/nvc0_state_validate.c
index 6d3caa1..37a6761 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_state_validate.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_state_validate.c
@@ -812,22 +812,20 @@ nvc0_switch_pipe_context(struct nvc0_context *ctx_to)
ctx_to->constbuf_dirty[s] = (1 << NVC0_MAX_PIPE_CONSTBUFS) - 1;
ctx_to->buffers_dirty[s] = ~0;
ctx_to->images_dirty[s] = ~0;
}
/* Reset tfb as the shader that owns it may have been deleted. */
ctx_to->state.tfb = NULL;
if (!ctx_to->vertex)
ctx_to->dirty_3d &= ~(NVC0_NEW_3D_VERTEX | NVC0_NEW_3D_ARRAYS);
- if (!ctx_to->idxbuf.buffer)
- ctx_to->dirty_3d &= ~NVC0_NEW_3D_IDXBUF;
if (!ctx_to->vertprog)
ctx_to->dirty_3d &= ~NVC0_NEW_3D_VERTPROG;
if (!ctx_to->fragprog)
ctx_to->dirty_3d &= ~NVC0_NEW_3D_FRAGPROG;
if (!ctx_to->blend)
ctx_to->dirty_3d &= ~NVC0_NEW_3D_BLEND;
if (!ctx_to->rast)
ctx_to->dirty_3d &= ~(NVC0_NEW_3D_RASTERIZER | NVC0_NEW_3D_SCISSOR);
@@ -869,21 +867,20 @@ validate_list_3d[] = {
NVC0_NEW_3D_GMTYPROG },
{ nvc0_constbufs_validate, NVC0_NEW_3D_CONSTBUF },
{ nvc0_validate_textures, NVC0_NEW_3D_TEXTURES },
{ nvc0_validate_samplers, NVC0_NEW_3D_SAMPLERS },
{ nve4_set_tex_handles, NVC0_NEW_3D_TEXTURES | NVC0_NEW_3D_SAMPLERS },
{ nvc0_validate_fbread, NVC0_NEW_3D_FRAGPROG |
NVC0_NEW_3D_FRAMEBUFFER },
{ nvc0_vertex_arrays_validate, NVC0_NEW_3D_VERTEX | NVC0_NEW_3D_ARRAYS },
{ nvc0_validate_surfaces, NVC0_NEW_3D_SURFACES },
{ nvc0_validate_buffers, NVC0_NEW_3D_BUFFERS },
- { nvc0_idxbuf_validate, NVC0_NEW_3D_IDXBUF },
{ nvc0_tfb_validate, NVC0_NEW_3D_TFB_TARGETS | NVC0_NEW_3D_GMTYPROG },
{ nvc0_layer_validate, NVC0_NEW_3D_VERTPROG |
NVC0_NEW_3D_TEVLPROG |
NVC0_NEW_3D_GMTYPROG },
{ nvc0_validate_driverconst, NVC0_NEW_3D_DRIVERCONST },
};
bool
nvc0_state_validate(struct nvc0_context *nvc0, uint32_t mask,
struct nvc0_state_validate *validate_list, int size,
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_vbo.c b/src/gallium/drivers/nouveau/nvc0/nvc0_vbo.c
index 7cea5fb..86c5e8b 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_vbo.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_vbo.c
@@ -515,40 +515,20 @@ nvc0_vertex_arrays_validate(struct nvc0_context *nvc0)
}
if (nvc0->state.vbo_mode) /* using translate, don't set up arrays here */
return;
if (vertex->shared_slots)
nvc0_validate_vertex_buffers_shared(nvc0);
else
nvc0_validate_vertex_buffers(nvc0);
}
-void
-nvc0_idxbuf_validate(struct nvc0_context *nvc0)
-{
- struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
-
- assert(buf);
- assert(nouveau_resource_mapped_by_gpu(&buf->base));
-
- PUSH_SPACE(push, 6);
- BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
- PUSH_DATAh(push, buf->address + nvc0->idxbuf.offset);
- PUSH_DATA (push, buf->address + nvc0->idxbuf.offset);
- PUSH_DATAh(push, buf->address + buf->base.width0 - 1);
- PUSH_DATA (push, buf->address + buf->base.width0 - 1);
- PUSH_DATA (push, nvc0->idxbuf.index_size >> 1);
-
- BCTX_REFN(nvc0->bufctx_3d, 3D_IDX, buf, RD);
-}
-
#define NVC0_PRIM_GL_CASE(n) \
case PIPE_PRIM_##n: return NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_##n
static inline unsigned
nvc0_prim_gl(unsigned prim)
{
switch (prim) {
NVC0_PRIM_GL_CASE(POINTS);
NVC0_PRIM_GL_CASE(LINES);
NVC0_PRIM_GL_CASE(LINE_LOOP);
@@ -581,21 +561,21 @@ nvc0_draw_vbo_kick_notify(struct nouveau_pushbuf *push)
static void
nvc0_draw_arrays(struct nvc0_context *nvc0,
unsigned mode, unsigned start, unsigned count,
unsigned instance_count)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
unsigned prim;
if (nvc0->state.index_bias) {
- /* index_bias is implied 0 if !info->indexed (really ?) */
+ /* index_bias is implied 0 if !info->index_size (really ?) */
/* TODO: can we deactivate it for the VERTEX_BUFFER_FIRST command ? */
PUSH_SPACE(push, 2);
IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0);
IMMED_NVC0(push, NVC0_3D(VERTEX_ID_BASE), 0);
nvc0->state.index_bias = 0;
}
prim = nvc0_prim_gl(mode);
while (instance_count--) {
@@ -704,55 +684,56 @@ nvc0_draw_elements_inline_u32_short(struct nouveau_pushbuf *push,
for (i = 0; i < nr; ++i) {
PUSH_DATA(push, (map[1] << 16) | map[0]);
map += 2;
}
count -= nr * 2;
}
}
static void
nvc0_draw_elements(struct nvc0_context *nvc0, bool shorten,
+ const struct pipe_draw_info *info,
unsigned mode, unsigned start, unsigned count,
- unsigned instance_count, int32_t index_bias)
+ unsigned instance_count, int32_t index_bias,
+ unsigned index_size)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
unsigned prim;
- const unsigned index_size = nvc0->idxbuf.index_size;
prim = nvc0_prim_gl(mode);
if (index_bias != nvc0->state.index_bias) {
PUSH_SPACE(push, 4);
BEGIN_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 1);
PUSH_DATA (push, index_bias);
BEGIN_NVC0(push, NVC0_3D(VERTEX_ID_BASE), 1);
PUSH_DATA (push, index_bias);
nvc0->state.index_bias = index_bias;
}
- if (nvc0->idxbuf.buffer) {
+ if (!info->has_user_indices) {
PUSH_SPACE(push, 1);
IMMED_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL), prim);
do {
PUSH_SPACE(push, 7);
BEGIN_NVC0(push, NVC0_3D(INDEX_BATCH_FIRST), 2);
PUSH_DATA (push, start);
PUSH_DATA (push, count);
if (--instance_count) {
BEGIN_NVC0(push, NVC0_3D(VERTEX_END_GL), 2);
PUSH_DATA (push, 0);
PUSH_DATA (push, prim | NVC0_3D_VERTEX_BEGIN_GL_INSTANCE_NEXT);
}
} while (instance_count);
IMMED_NVC0(push, NVC0_3D(VERTEX_END_GL), 0);
} else {
- const void *data = nvc0->idxbuf.user_buffer;
+ const void *data = info->index.user;
while (instance_count--) {
PUSH_SPACE(push, 2);
BEGIN_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL), 1);
PUSH_DATA (push, prim);
switch (index_size) {
case 1:
nvc0_draw_elements_inline_u08(push, data, start, count);
break;
case 2:
@@ -834,31 +815,31 @@ nvc0_draw_indirect(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
}
/* Queue things up to let the macros write params to the driver constbuf */
BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
PUSH_DATA (push, NVC0_CB_AUX_SIZE);
PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(0));
PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(0));
BEGIN_NVC0(push, NVC0_3D(CB_POS), 1);
PUSH_DATA (push, NVC0_CB_AUX_DRAW_INFO);
- if (info->indexed) {
- assert(nvc0->idxbuf.buffer);
- assert(nouveau_resource_mapped_by_gpu(nvc0->idxbuf.buffer));
+ if (info->index_size) {
+ assert(!info->has_user_indices);
+ assert(nouveau_resource_mapped_by_gpu(info->index.resource));
size = 5;
if (buf_count)
macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT_COUNT;
else
macro = NVC0_3D_MACRO_DRAW_ELEMENTS_INDIRECT;
} else {
if (nvc0->state.index_bias) {
- /* index_bias is implied 0 if !info->indexed (really ?) */
+ /* index_bias is implied 0 if !info->index_size (really ?) */
IMMED_NVC0(push, NVC0_3D(VB_ELEMENT_BASE), 0);
IMMED_NVC0(push, NVC0_3D(VERTEX_ID_BASE), 0);
nvc0->state.index_bias = 0;
}
size = 4;
if (buf_count)
macro = NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT_COUNT;
else
macro = NVC0_3D_MACRO_DRAW_ARRAYS_INDIRECT;
}
@@ -933,31 +914,34 @@ nvc0_update_prim_restart(struct nvc0_context *nvc0, bool en, uint32_t index)
}
void
nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nvc0_screen *screen = nvc0->screen;
int s;
+ if (info->index_size)
+ nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_IDX);
+
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
nvc0->vb_elt_first = info->min_index + info->index_bias;
nvc0->vb_elt_limit = info->max_index - info->min_index;
nvc0->instance_off = info->start_instance;
nvc0->instance_max = info->instance_count - 1;
/* For picking only a few vertices from a large user buffer, push is better,
* if index count is larger and we expect repeated vertices, suggest upload.
*/
nvc0->vbo_push_hint =
- !info->indirect && info->indexed &&
+ !info->indirect && info->index_size &&
(nvc0->vb_elt_limit >= (info->count * 2));
/* Check whether we want to switch vertex-submission mode. */
if (nvc0->vbo_user && !(nvc0->dirty_3d & (NVC0_NEW_3D_ARRAYS | NVC0_NEW_3D_VERTEX))) {
if (nvc0->vbo_push_hint != !!nvc0->state.vbo_mode)
if (nvc0->state.vbo_mode != 3)
nvc0->dirty_3d |= NVC0_NEW_3D_ARRAYS;
if (!(nvc0->dirty_3d & NVC0_NEW_3D_ARRAYS) && nvc0->state.vbo_mode == 0) {
if (nvc0->vertex->shared_slots)
@@ -967,20 +951,37 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
}
}
if (info->mode == PIPE_PRIM_PATCHES &&
nvc0->state.patch_vertices != info->vertices_per_patch) {
nvc0->state.patch_vertices = info->vertices_per_patch;
PUSH_SPACE(push, 1);
IMMED_NVC0(push, NVC0_3D(PATCH_VERTICES), nvc0->state.patch_vertices);
}
+ if (info->index_size && !info->has_user_indices) {
+ struct nv04_resource *buf = nv04_resource(info->index.resource);
+
+ assert(buf);
+ assert(nouveau_resource_mapped_by_gpu(&buf->base));
+
+ PUSH_SPACE(push, 6);
+ BEGIN_NVC0(push, NVC0_3D(INDEX_ARRAY_START_HIGH), 5);
+ PUSH_DATAh(push, buf->address);
+ PUSH_DATA (push, buf->address);
+ PUSH_DATAh(push, buf->address + buf->base.width0 - 1);
+ PUSH_DATA (push, buf->address + buf->base.width0 - 1);
+ PUSH_DATA (push, info->index_size >> 1);
+
+ BCTX_REFN(nvc0->bufctx_3d, 3D_IDX, buf, RD);
+ }
+
nvc0_state_validate_3d(nvc0, ~0);
if (nvc0->vertprog->vp.need_draw_parameters && !info->indirect) {
PUSH_SPACE(push, 9);
BEGIN_NVC0(push, NVC0_3D(CB_SIZE), 3);
PUSH_DATA (push, NVC0_CB_AUX_SIZE);
PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(0));
PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(0));
BEGIN_1IC0(push, NVC0_3D(CB_POS), 1 + 3);
PUSH_DATA (push, NVC0_CB_AUX_DRAW_INFO);
@@ -1039,47 +1040,47 @@ nvc0_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
if (nvc0->state.instance_base != info->start_instance) {
nvc0->state.instance_base = info->start_instance;
/* NOTE: this does not affect the shader input, should it ? */
BEGIN_NVC0(push, NVC0_3D(VB_INSTANCE_BASE), 1);
PUSH_DATA (push, info->start_instance);
}
nvc0->base.vbo_dirty |= !!nvc0->vtxbufs_coherent;
- if (!nvc0->base.vbo_dirty && nvc0->idxbuf.buffer &&
- nvc0->idxbuf.buffer->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ if (!nvc0->base.vbo_dirty && info->index_size && !info->has_user_indices &&
+ info->index.resource->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT)
nvc0->base.vbo_dirty = true;
nvc0_update_prim_restart(nvc0, info->primitive_restart, info->restart_index);
if (nvc0->base.vbo_dirty) {
if (nvc0->screen->eng3d->oclass < GM107_3D_CLASS)
IMMED_NVC0(push, NVC0_3D(VERTEX_ARRAY_FLUSH), 0);
nvc0->base.vbo_dirty = false;
}
if (unlikely(info->indirect)) {
nvc0_draw_indirect(nvc0, info);
} else
if (unlikely(info->count_from_stream_output)) {
nvc0_draw_stream_output(nvc0, info);
} else
- if (info->indexed) {
+ if (info->index_size) {
bool shorten = info->max_index <= 65535;
if (info->primitive_restart && info->restart_index > 65535)
shorten = false;
- nvc0_draw_elements(nvc0, shorten,
+ nvc0_draw_elements(nvc0, shorten, info,
info->mode, info->start, info->count,
- info->instance_count, info->index_bias);
+ info->instance_count, info->index_bias, info->index_size);
} else {
nvc0_draw_arrays(nvc0,
info->mode, info->start, info->count,
info->instance_count);
}
push->kick_notify = nvc0_default_kick_notify;
nvc0_release_user_vbufs(nvc0);
nouveau_pushbuf_bufctx(push, NULL);
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_vbo_translate.c b/src/gallium/drivers/nouveau/nvc0/nvc0_vbo_translate.c
index e4ccac8..f05618f 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_vbo_translate.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_vbo_translate.c
@@ -76,28 +76,30 @@ nvc0_vertex_configure_translate(struct nvc0_context *nvc0, int32_t index_bias)
nv04_resource(vb->buffer.resource), vb->buffer_offset, NOUVEAU_BO_RD);
if (index_bias && !unlikely(nvc0->vertex->instance_bufs & (1 << i)))
map += (intptr_t)index_bias * vb->stride;
translate->set_buffer(translate, i, map, vb->stride, ~0);
}
}
static inline void
-nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0)
+nvc0_push_map_idxbuf(struct push_context *ctx, struct nvc0_context *nvc0,
+ const struct pipe_draw_info *info,
+ unsigned offset)
{
- if (nvc0->idxbuf.buffer) {
- struct nv04_resource *buf = nv04_resource(nvc0->idxbuf.buffer);
+ if (!info->has_user_indices) {
+ struct nv04_resource *buf = nv04_resource(info->index.resource);
ctx->idxbuf = nouveau_resource_map_offset(&nvc0->base,
- buf, nvc0->idxbuf.offset, NOUVEAU_BO_RD);
+ buf, offset, NOUVEAU_BO_RD);
} else {
- ctx->idxbuf = nvc0->idxbuf.user_buffer;
+ ctx->idxbuf = info->index.user;
}
}
static inline void
nvc0_push_map_edgeflag(struct push_context *ctx, struct nvc0_context *nvc0,
int32_t index_bias)
{
unsigned attr = nvc0->vertprog->vp.edgeflag;
struct pipe_vertex_element *ve = &nvc0->vertex->element[attr].pipe;
struct pipe_vertex_buffer *vb = &nvc0->vtxbuf[ve->vertex_buffer_index];
@@ -492,30 +494,30 @@ nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
if (info->primitive_restart) {
/* NOTE: I hope we won't ever need that last index (~0).
* If we do, we have to disable primitive restart here always and
* use END,BEGIN to restart. (XXX: would that affect PrimitiveID ?)
* We could also deactive PRIM_RESTART_WITH_DRAW_ARRAYS temporarily,
* and add manual restart to disp_vertices_seq.
*/
BEGIN_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 2);
PUSH_DATA (ctx.push, 1);
- PUSH_DATA (ctx.push, info->indexed ? 0xffffffff : info->restart_index);
+ PUSH_DATA (ctx.push, info->index_size ? 0xffffffff : info->restart_index);
} else
if (nvc0->state.prim_restart) {
IMMED_NVC0(ctx.push, NVC0_3D(PRIM_RESTART_ENABLE), 0);
}
nvc0->state.prim_restart = info->primitive_restart;
- if (info->indexed) {
- nvc0_push_map_idxbuf(&ctx, nvc0);
- index_size = nvc0->idxbuf.index_size;
+ if (info->index_size) {
+ nvc0_push_map_idxbuf(&ctx, nvc0, info, info->start * info->index_size);
+ index_size = info->index_size;
} else {
if (unlikely(info->count_from_stream_output)) {
struct pipe_context *pipe = &nvc0->base.pipe;
struct nvc0_so_target *targ;
targ = nvc0_so_target(info->count_from_stream_output);
pipe->get_query_result(pipe, targ->pq, true, (void *)&vert_count);
vert_count /= targ->stride;
}
ctx.idxbuf = NULL; /* shut up warnings */
index_size = 0;
@@ -576,22 +578,22 @@ nvc0_push_vbo(struct nvc0_context *nvc0, const struct pipe_draw_info *info)
PUSH_SPACE(ctx.push, 4);
IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ID_REPLACE), 0);
BEGIN_NVC0(ctx.push, NVC0_3D(VERTEX_ATTRIB_FORMAT(1)), 1);
PUSH_DATA (ctx.push,
NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST |
NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32);
IMMED_NVC0(ctx.push, NVC0_3D(VERTEX_ARRAY_FETCH(1)), 0);
}
- if (info->indexed)
- nouveau_resource_unmap(nv04_resource(nvc0->idxbuf.buffer));
+ if (info->index_size && !info->has_user_indices)
+ nouveau_resource_unmap(nv04_resource(info->index.resource));
for (i = 0; i < nvc0->num_vtxbufs; ++i)
nouveau_resource_unmap(nv04_resource(nvc0->vtxbuf[i].buffer.resource));
NOUVEAU_DRV_STAT(&nvc0->screen->base, draw_calls_fallback_count, 1);
}
static inline void
copy_indices_u8(uint32_t *dst, const uint8_t *elts, uint32_t bias, unsigned n)
{
unsigned i;
@@ -619,38 +621,38 @@ static void
nvc0_push_upload_vertex_ids(struct push_context *ctx,
struct nvc0_context *nvc0,
const struct pipe_draw_info *info)
{
struct nouveau_pushbuf *push = ctx->push;
struct nouveau_bo *bo;
uint64_t va;
uint32_t *data;
uint32_t format;
- unsigned index_size = nvc0->idxbuf.index_size;
+ unsigned index_size = info->index_size;
unsigned i;
unsigned a = nvc0->vertex->num_elements;
if (!index_size || info->index_bias)
index_size = 4;
data = (uint32_t *)nouveau_scratch_get(&nvc0->base,
info->count * index_size, &va, &bo);
BCTX_REFN_bo(nvc0->bufctx_3d, 3D_VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD,
bo);
nouveau_pushbuf_validate(push);
- if (info->indexed) {
+ if (info->index_size) {
if (!info->index_bias) {
memcpy(data, ctx->idxbuf, info->count * index_size);
} else {
- switch (nvc0->idxbuf.index_size) {
+ switch (info->index_size) {
case 1:
copy_indices_u8(data, ctx->idxbuf, info->index_bias, info->count);
break;
case 2:
copy_indices_u16(data, ctx->idxbuf, info->index_bias, info->count);
break;
default:
copy_indices_u32(data, ctx->idxbuf, info->index_bias, info->count);
break;
}
diff --git a/src/gallium/drivers/r300/r300_context.h b/src/gallium/drivers/r300/r300_context.h
index 264ace5..ce1fab4 100644
--- a/src/gallium/drivers/r300/r300_context.h
+++ b/src/gallium/drivers/r300/r300_context.h
@@ -584,21 +584,20 @@ struct r300_context {
enum r300_fs_validity_status fs_status;
/* Framebuffer multi-write. */
boolean fb_multiwrite;
unsigned num_samples;
boolean msaa_enable;
boolean alpha_to_one;
boolean alpha_to_coverage;
void *dsa_decompress_zmask;
- struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
unsigned nr_vertex_buffers;
struct u_upload_mgr *uploader;
struct slab_child_pool pool_transfers;
/* Stat counter. */
uint64_t flush_counter;
/* const tracking for VS */
@@ -726,21 +725,21 @@ void r300_flush(struct pipe_context *pipe,
/* r300_hyperz.c */
void r300_update_hyperz_state(struct r300_context* r300);
/* r300_query.c */
void r300_resume_query(struct r300_context *r300,
struct r300_query *query);
void r300_stop_query(struct r300_context *r300);
/* r300_render_translate.c */
void r300_translate_index_buffer(struct r300_context *r300,
- struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
struct pipe_resource **out_index_buffer,
unsigned *index_size, unsigned index_offset,
unsigned *start, unsigned count);
/* r300_render_stencilref.c */
void r300_plug_in_stencil_ref_fallback(struct r300_context *r300);
/* r300_render.c */
void r500_emit_index_bias(struct r300_context *r300, int index_bias);
void r300_blitter_draw_rectangle(struct blitter_context *blitter,
diff --git a/src/gallium/drivers/r300/r300_render.c b/src/gallium/drivers/r300/r300_render.c
index 1d3e676..8eca143 100644
--- a/src/gallium/drivers/r300/r300_render.c
+++ b/src/gallium/drivers/r300/r300_render.c
@@ -494,39 +494,39 @@ static void r300_emit_draw_elements(struct r300_context *r300,
OUT_CS_RELOC(r300_resource(indexBuffer));
END_CS;
}
static void r300_draw_elements_immediate(struct r300_context *r300,
const struct pipe_draw_info *info)
{
const uint8_t *ptr1;
const uint16_t *ptr2;
const uint32_t *ptr4;
- unsigned index_size = r300->index_buffer.index_size;
+ unsigned index_size = info->index_size;
unsigned i, count_dwords = index_size == 4 ? info->count :
(info->count + 1) / 2;
CS_LOCALS(r300);
/* 19 dwords for r300_draw_elements_immediate. Give up if the function fails. */
if (!r300_prepare_for_rendering(r300,
PREP_EMIT_STATES | PREP_VALIDATE_VBOS | PREP_EMIT_VARRAYS |
PREP_INDEXED, NULL, 2+count_dwords, 0, info->index_bias, -1))
return;
r300_emit_draw_init(r300, info->mode, info->max_index);
BEGIN_CS(2 + count_dwords);
OUT_CS_PKT3(R300_PACKET3_3D_DRAW_INDX_2, count_dwords);
switch (index_size) {
case 1:
- ptr1 = (uint8_t*)r300->index_buffer.user_buffer;
+ ptr1 = (uint8_t*)info->index.user;
ptr1 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
r300_translate_primitive(info->mode));
if (info->index_bias && !r300->screen->caps.is_r500) {
for (i = 0; i < info->count-1; i += 2)
OUT_CS(((ptr1[i+1] + info->index_bias) << 16) |
(ptr1[i] + info->index_bias));
@@ -536,40 +536,40 @@ static void r300_draw_elements_immediate(struct r300_context *r300,
for (i = 0; i < info->count-1; i += 2)
OUT_CS(((ptr1[i+1]) << 16) |
(ptr1[i] ));
if (info->count & 1)
OUT_CS(ptr1[i]);
}
break;
case 2:
- ptr2 = (uint16_t*)r300->index_buffer.user_buffer;
+ ptr2 = (uint16_t*)info->index.user;
ptr2 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
r300_translate_primitive(info->mode));
if (info->index_bias && !r300->screen->caps.is_r500) {
for (i = 0; i < info->count-1; i += 2)
OUT_CS(((ptr2[i+1] + info->index_bias) << 16) |
(ptr2[i] + info->index_bias));
if (info->count & 1)
OUT_CS(ptr2[i] + info->index_bias);
} else {
OUT_CS_TABLE(ptr2, count_dwords);
}
break;
case 4:
- ptr4 = (uint32_t*)r300->index_buffer.user_buffer;
+ ptr4 = (uint32_t*)info->index.user;
ptr4 += info->start;
OUT_CS(R300_VAP_VF_CNTL__PRIM_WALK_INDICES | (info->count << 16) |
R300_VAP_VF_CNTL__INDEX_SIZE_32bit |
r300_translate_primitive(info->mode));
if (info->index_bias && !r300->screen->caps.is_r500) {
for (i = 0; i < info->count; i++)
OUT_CS(ptr4[i] + info->index_bias);
} else {
@@ -577,61 +577,62 @@ static void r300_draw_elements_immediate(struct r300_context *r300,
}
break;
}
END_CS;
}
static void r300_draw_elements(struct r300_context *r300,
const struct pipe_draw_info *info,
int instance_id)
{
- struct pipe_resource *indexBuffer = r300->index_buffer.buffer;
- unsigned indexSize = r300->index_buffer.index_size;
+ struct pipe_resource *indexBuffer =
+ info->has_user_indices ? NULL : info->index.resource;
+ unsigned indexSize = info->index_size;
struct pipe_resource* orgIndexBuffer = indexBuffer;
unsigned start = info->start;
unsigned count = info->count;
boolean alt_num_verts = r300->screen->caps.is_r500 &&
count > 65536;
unsigned short_count;
int buffer_offset = 0, index_offset = 0; /* for index bias emulation */
uint16_t indices3[3];
if (info->index_bias && !r300->screen->caps.is_r500) {
r300_split_index_bias(r300, info->index_bias, &buffer_offset,
&index_offset);
}
- r300_translate_index_buffer(r300, &r300->index_buffer, &indexBuffer,
+ r300_translate_index_buffer(r300, info, &indexBuffer,
&indexSize, index_offset, &start, count);
/* Fallback for misaligned ushort indices. */
if (indexSize == 2 && (start & 1) && indexBuffer) {
/* If we got here, then orgIndexBuffer == indexBuffer. */
uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf,
r300->cs,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED);
if (info->mode == PIPE_PRIM_TRIANGLES) {
memcpy(indices3, ptr + start, 6);
} else {
/* Copy the mapped index buffer directly to the upload buffer.
* The start index will be aligned simply from the fact that
* every sub-buffer in the upload buffer is aligned. */
r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start,
count, (uint8_t*)ptr);
}
} else {
- if (r300->index_buffer.user_buffer)
+ if (info->has_user_indices)
r300_upload_index_buffer(r300, &indexBuffer, indexSize,
&start, count,
- r300->index_buffer.user_buffer);
+ info->index.user);
}
/* 19 dwords for emit_draw_elements. Give up if the function fails. */
if (!r300_prepare_for_rendering(r300,
PREP_EMIT_STATES | PREP_VALIDATE_VBOS | PREP_EMIT_VARRAYS |
PREP_INDEXED, indexBuffer, 19, buffer_offset, info->index_bias,
instance_id))
goto done;
if (alt_num_verts || count <= 65535) {
@@ -785,40 +786,38 @@ static void r300_draw_vbo(struct pipe_context* pipe,
struct pipe_draw_info info = *dinfo;
if (r300->skip_rendering ||
!u_trim_pipe_prim(info.mode, &info.count)) {
return;
}
r300_update_derived_state(r300);
/* Draw. */
- if (info.indexed) {
+ if (info.index_size) {
unsigned max_count = r300_max_vertex_count(r300);
if (!max_count) {
fprintf(stderr, "r300: Skipping a draw command. There is a buffer "
" which is too small to be used for rendering.\n");
return;
}
if (max_count == ~0) {
/* There are no per-vertex vertex elements. Use the hardware maximum. */
max_count = 0xffffff;
}
info.max_index = max_count - 1;
- info.start += r300->index_buffer.offset / r300->index_buffer.index_size;
if (info.instance_count <= 1) {
- if (info.count <= 8 &&
- r300->index_buffer.user_buffer) {
+ if (info.count <= 8 && info.has_user_indices) {
r300_draw_elements_immediate(r300, &info);
} else {
r300_draw_elements(r300, &info, -1);
}
} else {
r300_draw_elements_instanced(r300, &info);
}
} else {
if (info.instance_count <= 1) {
if (immd_is_good_idea(r300, info.count)) {
@@ -843,20 +842,28 @@ static void r300_swtcl_draw_vbo(struct pipe_context* pipe,
{
struct r300_context* r300 = r300_context(pipe);
if (r300->skip_rendering) {
return;
}
if (!u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
return;
+ if (info->index_size) {
+ draw_set_indexes(r300->draw,
+ info->has_user_indices ?
+ info->index.user :
+ r300_resource(info->index.resource)->malloced_buffer,
+ info->index_size, ~0);
+ }
+
r300_update_derived_state(r300);
draw_vbo(r300->draw, info);
draw_flush(r300->draw);
}
/* Object for rendering using Draw. */
struct r300_render {
/* Parent class */
struct vbuf_render base;
diff --git a/src/gallium/drivers/r300/r300_render_translate.c b/src/gallium/drivers/r300/r300_render_translate.c
index 7800f6e..7dc49d3 100644
--- a/src/gallium/drivers/r300/r300_render_translate.c
+++ b/src/gallium/drivers/r300/r300_render_translate.c
@@ -19,63 +19,63 @@
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include "r300_context.h"
#include "util/u_index_modify.h"
#include "util/u_upload_mgr.h"
void r300_translate_index_buffer(struct r300_context *r300,
- struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
struct pipe_resource **out_buffer,
unsigned *index_size, unsigned index_offset,
unsigned *start, unsigned count)
{
unsigned out_offset;
void *ptr;
switch (*index_size) {
case 1:
*out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 2, 4,
&out_offset, out_buffer, &ptr);
util_shorten_ubyte_elts_to_userptr(
- &r300->context, ib, PIPE_TRANSFER_UNSYNCHRONIZED, index_offset,
+ &r300->context, info, PIPE_TRANSFER_UNSYNCHRONIZED, index_offset,
*start, count, ptr);
*index_size = 2;
*start = out_offset / 2;
break;
case 2:
if (index_offset) {
*out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 2, 4,
&out_offset, out_buffer, &ptr);
- util_rebuild_ushort_elts_to_userptr(&r300->context, ib,
+ util_rebuild_ushort_elts_to_userptr(&r300->context, info,
PIPE_TRANSFER_UNSYNCHRONIZED,
index_offset, *start,
count, ptr);
*start = out_offset / 2;
}
break;
case 4:
if (index_offset) {
*out_buffer = NULL;
u_upload_alloc(r300->uploader, 0, count * 4, 4,
&out_offset, out_buffer, &ptr);
- util_rebuild_uint_elts_to_userptr(&r300->context, ib,
+ util_rebuild_uint_elts_to_userptr(&r300->context, info,
PIPE_TRANSFER_UNSYNCHRONIZED,
index_offset, *start,
count, ptr);
*start = out_offset / 4;
}
break;
}
}
diff --git a/src/gallium/drivers/r300/r300_state.c b/src/gallium/drivers/r300/r300_state.c
index b3bfafd..c2b9937 100644
--- a/src/gallium/drivers/r300/r300_state.c
+++ b/src/gallium/drivers/r300/r300_state.c
@@ -1776,51 +1776,20 @@ static void r300_set_vertex_buffers_swtcl(struct pipe_context* pipe,
if (buffers[i].is_user_buffer) {
draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
buffers[i].buffer.user, ~0);
} else if (buffers[i].buffer.resource) {
draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
r300_resource(buffers[i].buffer.resource)->malloced_buffer, ~0);
}
}
}
-static void r300_set_index_buffer_hwtcl(struct pipe_context* pipe,
- const struct pipe_index_buffer *ib)
-{
- struct r300_context* r300 = r300_context(pipe);
-
- if (ib) {
- pipe_resource_reference(&r300->index_buffer.buffer, ib->buffer);
- memcpy(&r300->index_buffer, ib, sizeof(*ib));
- } else {
- pipe_resource_reference(&r300->index_buffer.buffer, NULL);
- }
-}
-
-static void r300_set_index_buffer_swtcl(struct pipe_context* pipe,
- const struct pipe_index_buffer *ib)
-{
- struct r300_context* r300 = r300_context(pipe);
-
- if (ib) {
- const void *buf = NULL;
- if (ib->user_buffer) {
- buf = ib->user_buffer;
- } else if (ib->buffer) {
- buf = r300_resource(ib->buffer)->malloced_buffer;
- }
- draw_set_indexes(r300->draw,
- (const ubyte *) buf + ib->offset,
- ib->index_size, ~0);
- }
-}
-
/* Initialize the PSC tables. */
static void r300_vertex_psc(struct r300_vertex_element_state *velems)
{
struct r300_vertex_stream_state *vstream = &velems->vertex_stream;
uint16_t type, swizzle;
enum pipe_format format;
unsigned i;
/* Vertex shaders have no semantics on their inputs,
* so PSC should just route stuff based on the vertex elements,
@@ -2118,24 +2087,22 @@ void r300_init_state_functions(struct r300_context* r300)
r300->context.set_sampler_views = r300_set_sampler_views;
r300->context.create_sampler_view = r300_create_sampler_view;
r300->context.sampler_view_destroy = r300_sampler_view_destroy;
r300->context.set_scissor_states = r300_set_scissor_states;
r300->context.set_viewport_states = r300_set_viewport_states;
if (r300->screen->caps.has_tcl) {
r300->context.set_vertex_buffers = r300_set_vertex_buffers_hwtcl;
- r300->context.set_index_buffer = r300_set_index_buffer_hwtcl;
} else {
r300->context.set_vertex_buffers = r300_set_vertex_buffers_swtcl;
- r300->context.set_index_buffer = r300_set_index_buffer_swtcl;
}
r300->context.create_vertex_elements_state = r300_create_vertex_elements_state;
r300->context.bind_vertex_elements_state = r300_bind_vertex_elements_state;
r300->context.delete_vertex_elements_state = r300_delete_vertex_elements_state;
r300->context.create_vs_state = r300_create_vs_state;
r300->context.bind_vs_state = r300_bind_vs_state;
r300->context.delete_vs_state = r300_delete_vs_state;
diff --git a/src/gallium/drivers/r600/r600_pipe.h b/src/gallium/drivers/r600/r600_pipe.h
index e1715e8..e5acd41 100644
--- a/src/gallium/drivers/r600/r600_pipe.h
+++ b/src/gallium/drivers/r600/r600_pipe.h
@@ -502,23 +502,20 @@ struct r600_context {
bool force_blend_disable;
boolean dual_src_blend;
unsigned zwritemask;
int ps_iter_samples;
/* The list of all texture buffer objects in this context.
* This list is walked when a buffer is invalidated/reallocated and
* the GPU addresses are updated. */
struct list_head texture_buffers;
- /* Index buffer. */
- struct pipe_index_buffer index_buffer;
-
/* Last draw state (-1 = unset). */
enum pipe_prim_type last_primitive_type; /* Last primitive type used in draw_vbo. */
enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
enum pipe_prim_type last_rast_prim;
unsigned last_start_instance;
void *sb_context;
struct r600_isa *isa;
float sample_positions[4 * 16];
float tess_state[8];
diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c
index ee6fd26..99ec5e7 100644
--- a/src/gallium/drivers/r600/r600_state_common.c
+++ b/src/gallium/drivers/r600/r600_state_common.c
@@ -516,34 +516,20 @@ static void r600_bind_vertex_elements(struct pipe_context *ctx, void *state)
r600_set_cso_state(rctx, &rctx->vertex_fetch_shader, state);
}
static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state)
{
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state;
r600_resource_reference(&shader->buffer, NULL);
FREE(shader);
}
-static void r600_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
- struct r600_context *rctx = (struct r600_context *)ctx;
-
- if (ib) {
- pipe_resource_reference(&rctx->index_buffer.buffer, ib->buffer);
- memcpy(&rctx->index_buffer, ib, sizeof(*ib));
- r600_context_add_resource_size(ctx, ib->buffer);
- } else {
- pipe_resource_reference(&rctx->index_buffer.buffer, NULL);
- }
-}
-
void r600_vertex_buffers_dirty(struct r600_context *rctx)
{
if (rctx->vertex_buffer_state.dirty_mask) {
rctx->vertex_buffer_state.atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 12 : 11) *
util_bitcount(rctx->vertex_buffer_state.dirty_mask);
r600_mark_atom_dirty(rctx, &rctx->vertex_buffer_state.atom);
}
}
static void r600_set_vertex_buffers(struct pipe_context *ctx,
@@ -1695,28 +1681,30 @@ static inline void r600_emit_rasterizer_prim_state(struct r600_context *rctx)
*/
radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2) |
(rctx->rasterizer ? rctx->rasterizer->pa_sc_line_stipple : 0));
rctx->last_rast_prim = rast_prim;
}
static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- struct pipe_index_buffer ib = {};
+ struct pipe_resource *indexbuf = info->has_user_indices ? NULL : info->index.resource;
struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
+ bool has_user_indices = info->has_user_indices;
uint64_t mask;
- unsigned num_patches, dirty_tex_counter;
+ unsigned num_patches, dirty_tex_counter, index_offset = 0;
+ unsigned index_size = info->index_size;
int index_bias;
- if (!info->indirect && !info->count && (info->indexed || !info->count_from_stream_output)) {
+ if (!info->indirect && !info->count && (index_size || !info->count_from_stream_output)) {
return;
}
if (unlikely(!rctx->vs_shader)) {
assert(0);
return;
}
if (unlikely(!rctx->ps_shader &&
(!rctx->rasterizer || !rctx->rasterizer->rasterizer_discard))) {
assert(0);
@@ -1740,86 +1728,77 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
/* useless to render because current rendering command
* can't be achieved
*/
return;
}
rctx->current_rast_prim = (rctx->gs_shader)? rctx->gs_shader->gs_output_prim
: (rctx->tes_shader)? rctx->tes_shader->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]
: info->mode;
- if (info->indexed) {
- /* Initialize the index buffer struct. */
- pipe_resource_reference(&ib.buffer, rctx->index_buffer.buffer);
- ib.user_buffer = rctx->index_buffer.user_buffer;
- ib.index_size = rctx->index_buffer.index_size;
- ib.offset = rctx->index_buffer.offset;
- if (!info->indirect) {
- ib.offset += info->start * ib.index_size;
- }
+ if (index_size) {
+ index_offset += info->start * index_size;
/* Translate 8-bit indices to 16-bit. */
- if (unlikely(ib.index_size == 1)) {
+ if (unlikely(index_size == 1)) {
struct pipe_resource *out_buffer = NULL;
unsigned out_offset;
void *ptr;
unsigned start, count;
if (likely(!info->indirect)) {
start = 0;
count = info->count;
}
else {
/* Have to get start/count from indirect buffer, slow path ahead... */
struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect->buffer;
unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource,
PIPE_TRANSFER_READ);
if (data) {
data += info->indirect->offset / sizeof(unsigned);
- start = data[2] * ib.index_size;
+ start = data[2] * index_size;
count = data[0];
}
else {
start = 0;
count = 0;
}
}
u_upload_alloc(ctx->stream_uploader, start, count * 2,
256, &out_offset, &out_buffer, &ptr);
- if (unlikely(!ptr)) {
- pipe_resource_reference(&ib.buffer, NULL);
+ if (unlikely(!ptr))
return;
- }
util_shorten_ubyte_elts_to_userptr(
- &rctx->b.b, &ib, 0, 0, ib.offset + start, count, ptr);
+ &rctx->b.b, info, 0, 0, index_offset, count, ptr);
- pipe_resource_reference(&ib.buffer, NULL);
- ib.user_buffer = NULL;
- ib.buffer = out_buffer;
- ib.offset = out_offset;
- ib.index_size = 2;
+ indexbuf = out_buffer;
+ index_offset = out_offset;
+ index_size = 2;
+ has_user_indices = false;
}
/* Upload the index buffer.
* The upload is skipped for small index counts on little-endian machines
* and the indices are emitted via PKT3_DRAW_INDEX_IMMD.
* Indirect draws never use immediate indices.
* Note: Instanced rendering in combination with immediate indices hangs. */
- if (ib.user_buffer && (R600_BIG_ENDIAN || info->indirect ||
+ if (has_user_indices && (R600_BIG_ENDIAN || info->indirect ||
info->instance_count > 1 ||
- info->count*ib.index_size > 20)) {
+ info->count*index_size > 20)) {
+ indexbuf = NULL;
u_upload_data(ctx->stream_uploader, 0,
- info->count * ib.index_size, 256,
- ib.user_buffer, &ib.offset, &ib.buffer);
- ib.user_buffer = NULL;
+ info->count * index_size, 256,
+ info->index.user, &index_offset, &indexbuf);
+ has_user_indices = false;
}
index_bias = info->index_bias;
} else {
index_bias = info->start;
}
/* Set the index offset and primitive restart. */
if (rctx->vgt_state.vgt_multi_prim_ib_reset_en != info->primitive_restart ||
rctx->vgt_state.vgt_multi_prim_ib_reset_indx != info->restart_index ||
rctx->vgt_state.vgt_indx_offset != index_bias ||
@@ -1833,21 +1812,21 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
/* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */
if (rctx->b.chip_class == R600) {
rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH;
r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom);
}
if (rctx->b.chip_class >= EVERGREEN)
evergreen_setup_tess_constants(rctx, info, &num_patches);
/* Emit states. */
- r600_need_cs_space(rctx, ib.user_buffer ? 5 : 0, TRUE);
+ r600_need_cs_space(rctx, has_user_indices ? 5 : 0, TRUE);
r600_flush_emit(rctx);
mask = rctx->dirty_atoms;
while (mask != 0) {
r600_emit_atom(rctx, rctx->atoms[u_bit_scan64(&mask)]);
}
if (rctx->b.chip_class == CAYMAN) {
/* Copied from radeonsi. */
unsigned primgroup_size = 128; /* recommended without a GS */
@@ -1930,58 +1909,58 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32UL) & 0xFF);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)info->indirect->buffer,
RADEON_USAGE_READ,
RADEON_PRIO_DRAW_INDIRECT));
}
- if (info->indexed) {
+ if (index_size) {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
- radeon_emit(cs, ib.index_size == 4 ?
+ radeon_emit(cs, index_size == 4 ?
(VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) :
(VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)));
- if (ib.user_buffer) {
- unsigned size_bytes = info->count*ib.index_size;
+ if (has_user_indices) {
+ unsigned size_bytes = info->count*index_size;
unsigned size_dw = align(size_bytes, 4) / 4;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, render_cond_bit));
radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_IMMEDIATE);
- radeon_emit_array(cs, ib.user_buffer, size_dw);
+ radeon_emit_array(cs, info->index.user, size_dw);
} else {
- uint64_t va = r600_resource(ib.buffer)->gpu_address + ib.offset;
+ uint64_t va = r600_resource(indexbuf)->gpu_address + index_offset;
if (likely(!info->indirect)) {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX, 3, render_cond_bit));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32UL) & 0xFF);
radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)ib.buffer,
+ (struct r600_resource*)indexbuf,
RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER));
}
else {
- uint32_t max_size = (ib.buffer->width0 - ib.offset) / ib.index_size;
+ uint32_t max_size = (indexbuf->width0 - index_offset) / index_size;
radeon_emit(cs, PKT3(EG_PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32UL) & 0xFF);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
- (struct r600_resource*)ib.buffer,
+ (struct r600_resource*)indexbuf,
RADEON_USAGE_READ,
RADEON_PRIO_INDEX_BUFFER));
radeon_emit(cs, PKT3(EG_PKT3_INDEX_BUFFER_SIZE, 0, 0));
radeon_emit(cs, max_size);
radeon_emit(cs, PKT3(EG_PKT3_DRAW_INDEX_INDIRECT, 1, render_cond_bit));
radeon_emit(cs, info->indirect->offset);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
}
@@ -2057,21 +2036,22 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
surf = rctx->framebuffer.state.cbufs[i];
rtex = (struct r600_texture*)surf->texture;
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
} while (mask);
}
rctx->framebuffer.do_update_surf_dirtiness = false;
}
- pipe_resource_reference(&ib.buffer, NULL);
+ if (index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
rctx->b.num_draw_calls++;
}
uint32_t r600_translate_stencil_op(int s_op)
{
switch (s_op) {
case PIPE_STENCIL_OP_KEEP:
return V_028800_STENCIL_KEEP;
case PIPE_STENCIL_OP_ZERO:
return V_028800_STENCIL_ZERO;
@@ -2964,21 +2944,20 @@ void r600_init_common_state_functions(struct r600_context *rctx)
rctx->b.b.delete_vs_state = r600_delete_vs_state;
rctx->b.b.delete_gs_state = r600_delete_gs_state;
rctx->b.b.delete_tcs_state = r600_delete_tcs_state;
rctx->b.b.delete_tes_state = r600_delete_tes_state;
rctx->b.b.set_blend_color = r600_set_blend_color;
rctx->b.b.set_clip_state = r600_set_clip_state;
rctx->b.b.set_constant_buffer = r600_set_constant_buffer;
rctx->b.b.set_sample_mask = r600_set_sample_mask;
rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref;
rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers;
- rctx->b.b.set_index_buffer = r600_set_index_buffer;
rctx->b.b.set_sampler_views = r600_set_sampler_views;
rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy;
rctx->b.b.texture_barrier = r600_texture_barrier;
rctx->b.b.set_stream_output_targets = r600_set_streamout_targets;
rctx->b.b.set_active_query_state = r600_set_active_query_state;
rctx->b.b.draw_vbo = r600_draw_vbo;
rctx->b.invalidate_buffer = r600_invalidate_buffer;
rctx->b.set_occlusion_query_state = r600_set_occlusion_query_state;
rctx->b.need_gfx_cs_space = r600_need_gfx_cs_space;
}
diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h
index ea61e1e..71a3e98 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.h
+++ b/src/gallium/drivers/radeonsi/si_pipe.h
@@ -307,21 +307,20 @@ struct si_context {
struct pipe_resource *tf_ring;
struct pipe_resource *tess_offchip_ring;
union pipe_color_union *border_color_table; /* in CPU memory, any endian */
struct r600_resource *border_color_buffer;
union pipe_color_union *border_color_map; /* in VRAM (slow access), little endian */
unsigned border_color_count;
/* Vertex and index buffers. */
bool vertex_buffers_dirty;
bool vertex_buffer_pointer_dirty;
- struct pipe_index_buffer index_buffer;
struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS];
/* MSAA config state. */
int ps_iter_samples;
bool smoothing_enabled;
/* DB render state. */
bool dbcb_depth_copy_enabled;
bool dbcb_stencil_copy_enabled;
unsigned dbcb_copy_sample;
diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
index 79c7f25..b67667f 100644
--- a/src/gallium/drivers/radeonsi/si_state.c
+++ b/src/gallium/drivers/radeonsi/si_state.c
@@ -3870,38 +3870,20 @@ static void si_set_vertex_buffers(struct pipe_context *ctx,
}
}
} else {
for (i = 0; i < count; i++) {
pipe_resource_reference(&dst[i].buffer.resource, NULL);
}
}
sctx->vertex_buffers_dirty = true;
}
-static void si_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
- struct si_context *sctx = (struct si_context *)ctx;
-
- if (ib) {
- struct pipe_resource *buf = ib->buffer;
-
- pipe_resource_reference(&sctx->index_buffer.buffer, buf);
- memcpy(&sctx->index_buffer, ib, sizeof(*ib));
- r600_context_add_resource_size(ctx, buf);
- if (buf)
- r600_resource(buf)->bind_history |= PIPE_BIND_INDEX_BUFFER;
- } else {
- pipe_resource_reference(&sctx->index_buffer.buffer, NULL);
- }
-}
-
/*
* Misc
*/
static void si_set_tess_state(struct pipe_context *ctx,
const float default_outer_level[4],
const float default_inner_level[2])
{
struct si_context *sctx = (struct si_context *)ctx;
struct pipe_constant_buffer cb;
@@ -4042,21 +4024,20 @@ void si_init_state_functions(struct si_context *sctx)
sctx->b.b.create_sampler_view = si_create_sampler_view;
sctx->b.b.sampler_view_destroy = si_sampler_view_destroy;
sctx->b.b.set_sample_mask = si_set_sample_mask;
sctx->b.b.create_vertex_elements_state = si_create_vertex_elements;
sctx->b.b.bind_vertex_elements_state = si_bind_vertex_elements;
sctx->b.b.delete_vertex_elements_state = si_delete_vertex_element;
sctx->b.b.set_vertex_buffers = si_set_vertex_buffers;
- sctx->b.b.set_index_buffer = si_set_index_buffer;
sctx->b.b.texture_barrier = si_texture_barrier;
sctx->b.b.memory_barrier = si_memory_barrier;
sctx->b.b.set_min_samples = si_set_min_samples;
sctx->b.b.set_tess_state = si_set_tess_state;
sctx->b.b.set_active_query_state = si_set_active_query_state;
sctx->b.set_occlusion_query_state = si_set_occlusion_query_state;
sctx->b.save_qbo_state = si_save_qbo_state;
sctx->b.need_gfx_cs_space = si_need_gfx_cs_space;
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.c b/src/gallium/drivers/radeonsi/si_state_draw.c
index 70b6ed8..ecc06d6 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.c
+++ b/src/gallium/drivers/radeonsi/si_state_draw.c
@@ -528,21 +528,21 @@ static void si_emit_rasterizer_prim_state(struct si_context *sctx)
S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
sctx->last_rast_prim = rast_prim;
sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
}
static void si_emit_vs_state(struct si_context *sctx,
const struct pipe_draw_info *info)
{
sctx->current_vs_state &= C_VS_STATE_INDEXED;
- sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->indexed);
+ sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->index_size);
if (sctx->current_vs_state != sctx->last_vs_state) {
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
radeon_set_sh_reg(cs,
sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX] +
SI_SGPR_VS_STATE_BITS * 4,
sctx->current_vs_state);
sctx->last_vs_state = sctx->current_vs_state;
@@ -601,21 +601,23 @@ static void si_emit_draw_registers(struct si_context *sctx,
(info->restart_index != sctx->last_restart_index ||
sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) {
radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
info->restart_index);
sctx->last_restart_index = info->restart_index;
}
}
static void si_emit_draw_packets(struct si_context *sctx,
const struct pipe_draw_info *info,
- const struct pipe_index_buffer *ib)
+ struct pipe_resource *indexbuf,
+ unsigned index_size,
+ unsigned index_offset)
{
struct pipe_draw_indirect_info *indirect = info->indirect;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX];
bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
uint32_t index_max_size = 0;
uint64_t index_va = 0;
if (info->count_from_stream_output) {
struct r600_so_target *t =
@@ -634,26 +636,26 @@ static void si_emit_draw_packets(struct si_context *sctx,
radeon_emit(cs, va >> 32); /* src address hi */
radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
radeon_emit(cs, 0); /* unused */
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
t->buf_filled_size, RADEON_USAGE_READ,
RADEON_PRIO_SO_FILLED_SIZE);
}
/* draw packet */
- if (info->indexed) {
- if (ib->index_size != sctx->last_index_size) {
+ if (index_size) {
+ if (index_size != sctx->last_index_size) {
unsigned index_type;
/* index type */
- switch (ib->index_size) {
+ switch (index_size) {
case 1:
index_type = V_028A7C_VGT_INDEX_8;
break;
case 2:
index_type = V_028A7C_VGT_INDEX_16 |
(SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ?
V_028A7C_VGT_DMA_SWAP_16_BIT : 0);
break;
case 4:
index_type = V_028A7C_VGT_INDEX_32 |
@@ -666,29 +668,29 @@ static void si_emit_draw_packets(struct si_context *sctx,
}
if (sctx->b.chip_class >= GFX9) {
radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
2, index_type);
} else {
radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
radeon_emit(cs, index_type);
}
- sctx->last_index_size = ib->index_size;
+ sctx->last_index_size = index_size;
}
- index_max_size = (ib->buffer->width0 - ib->offset) /
- ib->index_size;
- index_va = r600_resource(ib->buffer)->gpu_address + ib->offset;
+ index_max_size = (indexbuf->width0 - index_offset) /
+ index_size;
+ index_va = r600_resource(indexbuf)->gpu_address + index_offset;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource *)ib->buffer,
+ (struct r600_resource *)indexbuf,
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else {
/* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
* so the state must be re-emitted before the next indexed draw.
*/
if (sctx->b.chip_class >= CIK)
sctx->last_index_size = -1;
}
if (indirect) {
@@ -700,57 +702,57 @@ static void si_emit_draw_packets(struct si_context *sctx,
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
radeon_emit(cs, 1);
radeon_emit(cs, indirect_va);
radeon_emit(cs, indirect_va >> 32);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource *)indirect->buffer,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
- unsigned di_src_sel = info->indexed ? V_0287F0_DI_SRC_SEL_DMA
+ unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
: V_0287F0_DI_SRC_SEL_AUTO_INDEX;
assert(indirect->offset % 4 == 0);
- if (info->indexed) {
+ if (index_size) {
radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
radeon_emit(cs, index_va);
radeon_emit(cs, index_va >> 32);
radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
radeon_emit(cs, index_max_size);
}
if (!sctx->screen->has_draw_indirect_multi) {
- radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT
+ radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT
: PKT3_DRAW_INDIRECT,
3, render_cond_bit));
radeon_emit(cs, indirect->offset);
radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, di_src_sel);
} else {
uint64_t count_va = 0;
if (indirect->indirect_draw_count) {
struct r600_resource *params_buf =
(struct r600_resource *)indirect->indirect_draw_count;
radeon_add_to_buffer_list(
&sctx->b, &sctx->b.gfx, params_buf,
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
}
- radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
+ radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
PKT3_DRAW_INDIRECT_MULTI,
8, render_cond_bit));
radeon_emit(cs, indirect->offset);
radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
radeon_emit(cs, ((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
S_2C3_DRAW_INDEX_ENABLE(1) |
S_2C3_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
radeon_emit(cs, indirect->draw_count);
radeon_emit(cs, count_va);
@@ -758,40 +760,40 @@ static void si_emit_draw_packets(struct si_context *sctx,
radeon_emit(cs, indirect->stride);
radeon_emit(cs, di_src_sel);
}
} else {
int base_vertex;
radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
radeon_emit(cs, info->instance_count);
/* Base vertex and start instance. */
- base_vertex = info->indexed ? info->index_bias : info->start;
+ base_vertex = index_size ? info->index_bias : info->start;
if (base_vertex != sctx->last_base_vertex ||
sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
info->start_instance != sctx->last_start_instance ||
info->drawid != sctx->last_drawid ||
sh_base_reg != sctx->last_sh_base_reg) {
radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
radeon_emit(cs, base_vertex);
radeon_emit(cs, info->start_instance);
radeon_emit(cs, info->drawid);
sctx->last_base_vertex = base_vertex;
sctx->last_start_instance = info->start_instance;
sctx->last_drawid = info->drawid;
sctx->last_sh_base_reg = sh_base_reg;
}
- if (info->indexed) {
- index_va += info->start * ib->index_size;
+ if (index_size) {
+ index_va += info->start * index_size;
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
radeon_emit(cs, index_max_size);
radeon_emit(cs, index_va);
radeon_emit(cs, index_va >> 32);
radeon_emit(cs, info->count);
radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
} else {
radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
radeon_emit(cs, info->count);
@@ -1136,37 +1138,38 @@ void si_ce_post_draw_synchronization(struct si_context *sctx)
radeon_emit(sctx->b.gfx.cs, 0);
sctx->ce_need_synchronization = false;
}
}
void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
- const struct pipe_index_buffer *ib = &sctx->index_buffer;
- struct pipe_index_buffer ib_tmp; /* for index buffer uploads only */
+ struct pipe_resource *indexbuf = info->index.resource;
unsigned mask, dirty_tex_counter;
enum pipe_prim_type rast_prim;
unsigned num_patches = 0;
+ unsigned index_size = info->index_size;
+ unsigned index_offset = info->indirect ? info->start * index_size : 0;
if (likely(!info->indirect)) {
/* SI-CI treat instance_count==0 as instance_count==1. There is
* no workaround for indirect draws, but we can at least skip
* direct draws.
*/
if (unlikely(!info->instance_count))
return;
/* Handle count == 0. */
if (unlikely(!info->count &&
- (info->indexed || !info->count_from_stream_output)))
+ (index_size || !info->count_from_stream_output)))
return;
}
if (unlikely(!sctx->vs_shader.cso)) {
assert(0);
return;
}
if (unlikely(!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard))) {
assert(0);
return;
@@ -1224,72 +1227,69 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
sctx->do_update_shaders = true;
}
}
if (sctx->do_update_shaders && !si_update_shaders(sctx))
return;
if (!si_upload_graphics_shader_descriptors(sctx))
return;
- ib_tmp.buffer = NULL;
-
- if (info->indexed) {
+ if (index_size) {
/* Translate or upload, if needed. */
/* 8-bit indices are supported on VI. */
- if (sctx->b.chip_class <= CIK && ib->index_size == 1) {
- unsigned start, count, start_offset, size;
+ if (sctx->b.chip_class <= CIK && index_size == 1) {
+ unsigned start, count, start_offset, size, offset;
void *ptr;
si_get_draw_start_count(sctx, info, &start, &count);
start_offset = start * 2;
size = count * 2;
+ indexbuf = NULL;
u_upload_alloc(ctx->stream_uploader, start_offset,
size,
si_optimal_tcc_alignment(sctx, size),
- &ib_tmp.offset, &ib_tmp.buffer, &ptr);
- if (!ib_tmp.buffer)
+ &offset, &indexbuf, &ptr);
+ if (!indexbuf)
return;
- util_shorten_ubyte_elts_to_userptr(&sctx->b.b, ib, 0, 0,
- ib->offset + start,
+ util_shorten_ubyte_elts_to_userptr(&sctx->b.b, info, 0, 0,
+ index_offset + start,
count, ptr);
/* info->start will be added by the drawing code */
- ib_tmp.offset -= start_offset;
- ib_tmp.index_size = 2;
- ib = &ib_tmp;
- } else if (ib->user_buffer && !ib->buffer) {
+ index_offset = offset - start_offset;
+ index_size = 2;
+ } else if (info->has_user_indices) {
unsigned start_offset;
assert(!info->indirect);
- start_offset = info->start * ib->index_size;
+ start_offset = info->start * index_size;
+ indexbuf = NULL;
u_upload_data(ctx->stream_uploader, start_offset,
- info->count * ib->index_size,
+ info->count * index_size,
sctx->screen->b.info.tcc_cache_line_size,
- (char*)ib->user_buffer + start_offset,
- &ib_tmp.offset, &ib_tmp.buffer);
- if (!ib_tmp.buffer)
+ (char*)info->index.user + start_offset,
+ &index_offset, &indexbuf);
+ if (!indexbuf)
return;
/* info->start will be added by the drawing code */
- ib_tmp.offset -= start_offset;
- ib_tmp.index_size = ib->index_size;
- ib = &ib_tmp;
+ index_offset -= start_offset;
} else if (sctx->b.chip_class <= CIK &&
- r600_resource(ib->buffer)->TC_L2_dirty) {
+ r600_resource(indexbuf)->TC_L2_dirty) {
/* VI reads index buffers through TC L2, so it doesn't
* need this. */
sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
- r600_resource(ib->buffer)->TC_L2_dirty = false;
+ r600_resource(indexbuf)->TC_L2_dirty = false;
}
}
if (info->indirect) {
struct pipe_draw_indirect_info *indirect = info->indirect;
/* Add the buffer size for memory checking in need_cs_space. */
r600_context_add_resource_size(ctx, indirect->buffer);
if (r600_resource(indirect->buffer)->TC_L2_dirty) {
@@ -1346,21 +1346,21 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
}
sctx->dirty_states = 0;
si_emit_rasterizer_prim_state(sctx);
if (sctx->tes_shader.cso)
si_emit_derived_tess_state(sctx, info, &num_patches);
si_emit_vs_state(sctx, info);
si_emit_draw_registers(sctx, info, num_patches);
si_ce_pre_draw_synchronization(sctx);
- si_emit_draw_packets(sctx, info, ib);
+ si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
si_ce_post_draw_synchronization(sctx);
if (sctx->trace_buf)
si_trace_emit(sctx);
/* Workaround for a VGT hang when streamout is enabled.
* It must be done after drawing. */
if ((sctx->b.family == CHIP_HAWAII ||
sctx->b.family == CHIP_TONGA ||
sctx->b.family == CHIP_FIJI) &&
@@ -1392,26 +1392,27 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
if (rtex->fmask.size)
rtex->dirty_level_mask |= 1 << surf->u.tex.level;
if (rtex->dcc_gather_statistics)
rtex->separate_dcc_dirty = true;
} while (mask);
}
sctx->framebuffer.do_update_surf_dirtiness = false;
}
- pipe_resource_reference(&ib_tmp.buffer, NULL);
sctx->b.num_draw_calls++;
if (info->primitive_restart)
sctx->b.num_prim_restart_calls++;
if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
sctx->b.num_spill_draw_calls++;
+ if (index_size && indexbuf != info->index.resource)
+ pipe_resource_reference(&indexbuf, NULL);
}
void si_trace_emit(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
sctx->trace_id++;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, sctx->trace_buf,
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
diff --git a/src/gallium/drivers/rbug/rbug_context.c b/src/gallium/drivers/rbug/rbug_context.c
index 91b1ac6..e1f3c4f 100644
--- a/src/gallium/drivers/rbug/rbug_context.c
+++ b/src/gallium/drivers/rbug/rbug_context.c
@@ -787,39 +787,20 @@ rbug_set_vertex_buffers(struct pipe_context *_pipe,
}
pipe->set_vertex_buffers(pipe, start_slot,
num_buffers,
buffers);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
-rbug_set_index_buffer(struct pipe_context *_pipe,
- const struct pipe_index_buffer *_ib)
-{
- struct rbug_context *rb_pipe = rbug_context(_pipe);
- struct pipe_context *pipe = rb_pipe->pipe;
- struct pipe_index_buffer unwrapped_ib, *ib = NULL;
-
- if (_ib) {
- unwrapped_ib = *_ib;
- unwrapped_ib.buffer = rbug_resource_unwrap(_ib->buffer);
- ib = &unwrapped_ib;
- }
-
- mtx_lock(&rb_pipe->call_mutex);
- pipe->set_index_buffer(pipe, ib);
- mtx_unlock(&rb_pipe->call_mutex);
-}
-
-static void
rbug_set_sample_mask(struct pipe_context *_pipe,
unsigned sample_mask)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
mtx_lock(&rb_pipe->call_mutex);
pipe->set_sample_mask(pipe, sample_mask);
mtx_unlock(&rb_pipe->call_mutex);
}
@@ -1253,21 +1234,20 @@ rbug_context_create(struct pipe_screen *_screen, struct pipe_context *pipe)
rb_pipe->base.set_blend_color = rbug_set_blend_color;
rb_pipe->base.set_stencil_ref = rbug_set_stencil_ref;
rb_pipe->base.set_clip_state = rbug_set_clip_state;
rb_pipe->base.set_constant_buffer = rbug_set_constant_buffer;
rb_pipe->base.set_framebuffer_state = rbug_set_framebuffer_state;
rb_pipe->base.set_polygon_stipple = rbug_set_polygon_stipple;
rb_pipe->base.set_scissor_states = rbug_set_scissor_states;
rb_pipe->base.set_viewport_states = rbug_set_viewport_states;
rb_pipe->base.set_sampler_views = rbug_set_sampler_views;
rb_pipe->base.set_vertex_buffers = rbug_set_vertex_buffers;
- rb_pipe->base.set_index_buffer = rbug_set_index_buffer;
rb_pipe->base.set_sample_mask = rbug_set_sample_mask;
rb_pipe->base.create_stream_output_target = rbug_create_stream_output_target;
rb_pipe->base.stream_output_target_destroy = rbug_stream_output_target_destroy;
rb_pipe->base.set_stream_output_targets = rbug_set_stream_output_targets;
rb_pipe->base.resource_copy_region = rbug_resource_copy_region;
rb_pipe->base.blit = rbug_blit;
rb_pipe->base.flush_resource = rbug_flush_resource;
rb_pipe->base.clear = rbug_clear;
rb_pipe->base.clear_render_target = rbug_clear_render_target;
rb_pipe->base.clear_depth_stencil = rbug_clear_depth_stencil;
diff --git a/src/gallium/drivers/softpipe/sp_context.h b/src/gallium/drivers/softpipe/sp_context.h
index a57f587..7ce4dc3 100644
--- a/src/gallium/drivers/softpipe/sp_context.h
+++ b/src/gallium/drivers/softpipe/sp_context.h
@@ -81,21 +81,20 @@ struct softpipe_context {
struct pipe_resource *constants[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
struct pipe_framebuffer_state framebuffer;
struct pipe_poly_stipple poly_stipple;
struct pipe_scissor_state scissors[PIPE_MAX_VIEWPORTS];
struct pipe_sampler_view *sampler_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_SAMPLER_VIEWS];
struct pipe_image_view images[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
struct pipe_shader_buffer buffers[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
struct pipe_viewport_state viewports[PIPE_MAX_VIEWPORTS];
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer index_buffer;
struct pipe_resource *mapped_vs_tex[PIPE_MAX_SHADER_SAMPLER_VIEWS];
struct pipe_resource *mapped_gs_tex[PIPE_MAX_SHADER_SAMPLER_VIEWS];
struct draw_so_target *so_targets[PIPE_MAX_SO_BUFFERS];
unsigned num_so_targets;
struct pipe_query_data_so_statistics so_stats;
struct pipe_query_data_pipeline_statistics pipeline_statistics;
unsigned active_statistics_queries;
diff --git a/src/gallium/drivers/softpipe/sp_draw_arrays.c b/src/gallium/drivers/softpipe/sp_draw_arrays.c
index 137ad05..6363701 100644
--- a/src/gallium/drivers/softpipe/sp_draw_arrays.c
+++ b/src/gallium/drivers/softpipe/sp_draw_arrays.c
@@ -89,35 +89,31 @@ softpipe_draw_vbo(struct pipe_context *pipe,
if (!sp->vertex_buffer[i].buffer.resource) {
continue;
}
buf = softpipe_resource_data(sp->vertex_buffer[i].buffer.resource);
size = sp->vertex_buffer[i].buffer.resource->width0;
}
draw_set_mapped_vertex_buffer(draw, i, buf, size);
}
/* Map index buffer, if present */
- if (info->indexed) {
+ if (info->index_size) {
unsigned available_space = ~0;
- mapped_indices = sp->index_buffer.user_buffer;
+ mapped_indices = info->has_user_indices ? info->index.user : NULL;
if (!mapped_indices) {
- mapped_indices = softpipe_resource_data(sp->index_buffer.buffer);
- if (sp->index_buffer.buffer->width0 > sp->index_buffer.offset)
- available_space =
- (sp->index_buffer.buffer->width0 - sp->index_buffer.offset);
- else
- available_space = 0;
+ mapped_indices = softpipe_resource_data(info->index.resource);
+ available_space = info->index.resource->width0;
}
draw_set_indexes(draw,
- (ubyte *) mapped_indices + sp->index_buffer.offset,
- sp->index_buffer.index_size, available_space);
+ (ubyte *) mapped_indices,
+ info->index_size, available_space);
}
for (i = 0; i < sp->num_so_targets; i++) {
void *buf = 0;
if (sp->so_targets[i]) {
buf = softpipe_resource(sp->so_targets[i]->target.buffer)->data;
sp->so_targets[i]->mapping = buf;
}
}
diff --git a/src/gallium/drivers/softpipe/sp_state_vertex.c b/src/gallium/drivers/softpipe/sp_state_vertex.c
index 48c8d2c..a7a8736 100644
--- a/src/gallium/drivers/softpipe/sp_state_vertex.c
+++ b/src/gallium/drivers/softpipe/sp_state_vertex.c
@@ -90,33 +90,19 @@ softpipe_set_vertex_buffers(struct pipe_context *pipe,
util_set_vertex_buffers_count(softpipe->vertex_buffer,
&softpipe->num_vertex_buffers,
buffers, start_slot, count);
softpipe->dirty |= SP_NEW_VERTEX;
draw_set_vertex_buffers(softpipe->draw, start_slot, count, buffers);
}
-static void
-softpipe_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct softpipe_context *softpipe = softpipe_context(pipe);
-
- if (ib)
- memcpy(&softpipe->index_buffer, ib, sizeof(softpipe->index_buffer));
- else
- memset(&softpipe->index_buffer, 0, sizeof(softpipe->index_buffer));
-}
-
-
void
softpipe_init_vertex_funcs(struct pipe_context *pipe)
{
pipe->create_vertex_elements_state = softpipe_create_vertex_elements_state;
pipe->bind_vertex_elements_state = softpipe_bind_vertex_elements_state;
pipe->delete_vertex_elements_state = softpipe_delete_vertex_elements_state;
pipe->set_vertex_buffers = softpipe_set_vertex_buffers;
- pipe->set_index_buffer = softpipe_set_index_buffer;
}
diff --git a/src/gallium/drivers/svga/svga_context.h b/src/gallium/drivers/svga/svga_context.h
index a214f2c..a268635 100644
--- a/src/gallium/drivers/svga/svga_context.h
+++ b/src/gallium/drivers/svga/svga_context.h
@@ -261,21 +261,20 @@ struct svga_state
const struct svga_sampler_state *sampler[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
const struct svga_velems_state *velems;
struct pipe_sampler_view *sampler_views[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS]; /* or texture ID's? */
struct svga_fragment_shader *fs;
struct svga_vertex_shader *vs;
struct svga_geometry_shader *user_gs; /* user-specified GS */
struct svga_geometry_shader *gs; /* derived GS */
struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer ib;
/** Constant buffers for each shader.
* The size should probably always match with that of
* svga_shader_emitter_v10.num_shader_consts.
*/
struct pipe_constant_buffer constbufs[PIPE_SHADER_TYPES][SVGA_MAX_CONST_BUFS];
struct pipe_framebuffer_state framebuffer;
float depthscale;
/* Hack to limit the number of different render targets between
diff --git a/src/gallium/drivers/svga/svga_pipe_draw.c b/src/gallium/drivers/svga/svga_pipe_draw.c
index eaf4681..0eb54a5 100644
--- a/src/gallium/drivers/svga/svga_pipe_draw.c
+++ b/src/gallium/drivers/svga/svga_pipe_draw.c
@@ -164,79 +164,80 @@ done:
/**
* Determine if we need to implement primitive restart with a fallback
* path which breaks the original primitive into sub-primitive at the
* restart indexes.
*/
static boolean
need_fallback_prim_restart(const struct svga_context *svga,
const struct pipe_draw_info *info)
{
- if (info->primitive_restart && info->indexed) {
+ if (info->primitive_restart && info->index_size) {
if (!svga_have_vgpu10(svga))
return TRUE;
else if (!svga->state.sw.need_swtnl) {
- if (svga->curr.ib.index_size == 1)
+ if (info->index_size == 1)
return TRUE; /* no device support for 1-byte indexes */
- else if (svga->curr.ib.index_size == 2)
+ else if (info->index_size == 2)
return info->restart_index != 0xffff;
else
return info->restart_index != 0xffffffff;
}
}
return FALSE;
}
static void
svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
{
struct svga_context *svga = svga_context( pipe );
enum pipe_prim_type reduced_prim = u_reduced_prim( info->mode );
unsigned count = info->count;
enum pipe_error ret = 0;
boolean needed_swtnl;
+ struct pipe_resource *indexbuf =
+ info->has_user_indices ? NULL : info->index.resource;
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_DRAWVBO);
svga->hud.num_draw_calls++; /* for SVGA_QUERY_NUM_DRAW_CALLS */
if (u_reduced_prim(info->mode) == PIPE_PRIM_TRIANGLES &&
svga->curr.rast->templ.cull_face == PIPE_FACE_FRONT_AND_BACK)
goto done;
/* Upload a user index buffer. */
- struct pipe_index_buffer ibuffer_saved = {0};
- if (info->indexed && svga->curr.ib.user_buffer &&
- !util_save_and_upload_index_buffer(pipe, info, &svga->curr.ib,
- &ibuffer_saved)) {
- return;
+ unsigned index_offset = 0;
+ if (info->index_size && info->has_user_indices &&
+ !util_upload_index_buffer(pipe, info, &indexbuf, &index_offset)) {
+ goto done;
}
/*
* Mark currently bound target surfaces as dirty
* doesn't really matter if it is done before drawing.
*
* TODO If we ever normaly return something other then
* true we should not mark it as dirty then.
*/
svga_mark_surfaces_dirty(svga_context(pipe));
if (svga->curr.reduced_prim != reduced_prim) {
svga->curr.reduced_prim = reduced_prim;
svga->dirty |= SVGA_NEW_REDUCED_PRIMITIVE;
}
if (need_fallback_prim_restart(svga, info)) {
enum pipe_error r;
- r = util_draw_vbo_without_prim_restart(pipe, &svga->curr.ib, info);
+ r = util_draw_vbo_without_prim_restart(pipe, info);
assert(r == PIPE_OK);
(void) r;
goto done;
}
if (!u_trim_pipe_prim( info->mode, &count ))
goto done;
needed_swtnl = svga->state.sw.need_swtnl;
@@ -251,32 +252,32 @@ svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
* referenced in the current command buffer as result of previous HW
* TNL. So flush now, to prevent the context to flush while a referred
* vertex buffer is mapped.
*/
svga_context_flush(svga, NULL);
}
/* Avoid leaking the previous hwtnl bias to swtnl */
svga_hwtnl_set_index_bias( svga->hwtnl, 0 );
- ret = svga_swtnl_draw_vbo( svga, info );
+ ret = svga_swtnl_draw_vbo( svga, info, indexbuf );
}
else {
- if (info->indexed && svga->curr.ib.buffer) {
+ if (info->index_size && indexbuf) {
unsigned offset;
- assert(svga->curr.ib.offset % svga->curr.ib.index_size == 0);
- offset = svga->curr.ib.offset / svga->curr.ib.index_size;
+ assert(index_offset % info->index_size == 0);
+ offset = index_offset / info->index_size;
ret = retry_draw_range_elements( svga,
- svga->curr.ib.buffer,
- svga->curr.ib.index_size,
+ indexbuf,
+ info->index_size,
info->index_bias,
info->min_index,
info->max_index,
info->mode,
info->start + offset,
count,
info->start_instance,
info->instance_count,
TRUE );
}
@@ -289,21 +290,20 @@ svga_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
/* XXX: Silence warnings, do something sensible here? */
(void)ret;
if (SVGA_DEBUG & DEBUG_FLUSH) {
svga_hwtnl_flush_retry( svga );
svga_context_flush(svga, NULL);
}
done:
- if (info->indexed && ibuffer_saved.user_buffer)
- pipe->set_index_buffer(pipe, &ibuffer_saved);
-
+ if (info->index_size && info->index.resource != indexbuf)
+ pipe_resource_reference(&indexbuf, NULL);
SVGA_STATS_TIME_POP(svga_sws(svga));
}
void svga_init_draw_functions( struct svga_context *svga )
{
svga->pipe.draw_vbo = svga_draw_vbo;
}
diff --git a/src/gallium/drivers/svga/svga_pipe_vertex.c b/src/gallium/drivers/svga/svga_pipe_vertex.c
index 5243ae6..9f67608 100644
--- a/src/gallium/drivers/svga/svga_pipe_vertex.c
+++ b/src/gallium/drivers/svga/svga_pipe_vertex.c
@@ -47,29 +47,20 @@ static void svga_set_vertex_buffers(struct pipe_context *pipe,
struct svga_context *svga = svga_context(pipe);
util_set_vertex_buffers_count(svga->curr.vb,
&svga->curr.num_vertex_buffers,
buffers, start_slot, count);
svga->dirty |= SVGA_NEW_VBUFFER;
}
-static void svga_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct svga_context *svga = svga_context(pipe);
-
- util_set_index_buffer(&svga->curr.ib, ib);
-}
-
-
/**
* Does the given vertex attrib format need range adjustment in the VS?
* Range adjustment scales and biases values from [0,1] to [-1,1].
* This lets us avoid the swtnl path.
*/
static boolean
attrib_needs_range_adjustment(enum pipe_format format)
{
switch (format) {
case PIPE_FORMAT_R8G8B8_SNORM:
@@ -333,15 +324,14 @@ void svga_cleanup_vertex_state( struct svga_context *svga )
pipe_resource_reference(&svga->state.hw_draw.ib, NULL);
for (i = 0; i < svga->state.hw_draw.num_vbuffers; i++)
pipe_resource_reference(&svga->state.hw_draw.vbuffers[i], NULL);
}
void svga_init_vertex_functions( struct svga_context *svga )
{
svga->pipe.set_vertex_buffers = svga_set_vertex_buffers;
- svga->pipe.set_index_buffer = svga_set_index_buffer;
svga->pipe.create_vertex_elements_state = svga_create_vertex_elements_state;
svga->pipe.bind_vertex_elements_state = svga_bind_vertex_elements_state;
svga->pipe.delete_vertex_elements_state = svga_delete_vertex_elements_state;
}
diff --git a/src/gallium/drivers/svga/svga_swtnl.h b/src/gallium/drivers/svga/svga_swtnl.h
index fc094e5..1c66ab6 100644
--- a/src/gallium/drivers/svga/svga_swtnl.h
+++ b/src/gallium/drivers/svga/svga_swtnl.h
@@ -32,14 +32,15 @@ struct svga_context;
struct pipe_context;
struct vbuf_render;
boolean svga_init_swtnl( struct svga_context *svga );
void svga_destroy_swtnl( struct svga_context *svga );
enum pipe_error
svga_swtnl_draw_vbo(struct svga_context *svga,
- const struct pipe_draw_info *info);
+ const struct pipe_draw_info *info,
+ struct pipe_resource *indexbuf);
#endif
diff --git a/src/gallium/drivers/svga/svga_swtnl_draw.c b/src/gallium/drivers/svga/svga_swtnl_draw.c
index 76d128d..1898fa2 100644
--- a/src/gallium/drivers/svga/svga_swtnl_draw.c
+++ b/src/gallium/drivers/svga/svga_swtnl_draw.c
@@ -31,21 +31,22 @@
#include "svga_context.h"
#include "svga_screen.h"
#include "svga_swtnl.h"
#include "svga_state.h"
#include "svga_swtnl_private.h"
enum pipe_error
svga_swtnl_draw_vbo(struct svga_context *svga,
- const struct pipe_draw_info *info)
+ const struct pipe_draw_info *info,
+ struct pipe_resource *indexbuf)
{
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = { 0 };
struct pipe_transfer *ib_transfer = NULL;
struct pipe_transfer *cb_transfer[SVGA_MAX_CONST_BUFS] = { 0 };
struct draw_context *draw = svga->swtnl.draw;
MAYBE_UNUSED unsigned old_num_vertex_buffers;
unsigned i;
const void *map;
enum pipe_error ret;
@@ -76,27 +77,27 @@ svga_swtnl_draw_vbo(struct svga_context *svga,
PIPE_TRANSFER_READ,
&vb_transfer[i]);
draw_set_mapped_vertex_buffer(draw, i, map, ~0);
}
}
old_num_vertex_buffers = svga->curr.num_vertex_buffers;
/* Map index buffer, if present */
map = NULL;
- if (info->indexed && svga->curr.ib.buffer) {
- map = pipe_buffer_map(&svga->pipe, svga->curr.ib.buffer,
+ if (info->index_size && indexbuf) {
+ map = pipe_buffer_map(&svga->pipe, indexbuf,
PIPE_TRANSFER_READ,
&ib_transfer);
draw_set_indexes(draw,
- (const ubyte *) map + svga->curr.ib.offset,
- svga->curr.ib.index_size, ~0);
+ (const ubyte *) map,
+ info->index_size, ~0);
}
/* Map constant buffers */
for (i = 0; i < ARRAY_SIZE(svga->curr.constbufs[PIPE_SHADER_VERTEX]); ++i) {
if (svga->curr.constbufs[PIPE_SHADER_VERTEX][i].buffer == NULL) {
continue;
}
map = pipe_buffer_map(&svga->pipe,
svga->curr.constbufs[PIPE_SHADER_VERTEX][i].buffer,
diff --git a/src/gallium/drivers/swr/swr_context.h b/src/gallium/drivers/swr/swr_context.h
index 4de20c1..93e6f05 100644
--- a/src/gallium/drivers/swr/swr_context.h
+++ b/src/gallium/drivers/swr/swr_context.h
@@ -130,21 +130,20 @@ struct swr_context {
constants[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
struct pipe_framebuffer_state framebuffer;
struct swr_poly_stipple poly_stipple;
struct pipe_scissor_state scissor;
SWR_RECT swr_scissor;
struct pipe_sampler_view *
sampler_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_SAMPLER_VIEWS];
struct pipe_viewport_state viewport;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer index_buffer;
struct blitter_context *blitter;
/** Conditional query object and mode */
struct pipe_query *render_cond_query;
enum pipe_render_cond_flag render_cond_mode;
boolean render_cond_cond;
unsigned active_queries;
unsigned num_vertex_buffers;
diff --git a/src/gallium/drivers/swr/swr_draw.cpp b/src/gallium/drivers/swr/swr_draw.cpp
index de78cf3..03c82a7 100644
--- a/src/gallium/drivers/swr/swr_draw.cpp
+++ b/src/gallium/drivers/swr/swr_draw.cpp
@@ -160,21 +160,21 @@ swr_draw_vbo(struct pipe_context *pipe, const struct pipe_draw_info *info)
case PIPE_PRIM_LINE_STRIP:
feState.topologyProvokingVertex = feState.provokingVertex.lineStripList;
break;
default:
feState.topologyProvokingVertex = 0;
}
feState.bEnableCutIndex = info->primitive_restart;
SwrSetFrontendState(ctx->swrContext, &feState);
- if (info->indexed)
+ if (info->index_size)
SwrDrawIndexedInstanced(ctx->swrContext,
swr_convert_prim_topology(info->mode),
info->count,
info->instance_count,
info->start,
info->index_bias,
info->start_instance);
else
SwrDrawInstanced(ctx->swrContext,
swr_convert_prim_topology(info->mode),
diff --git a/src/gallium/drivers/swr/swr_state.cpp b/src/gallium/drivers/swr/swr_state.cpp
index e224f20..8750bda 100644
--- a/src/gallium/drivers/swr/swr_state.cpp
+++ b/src/gallium/drivers/swr/swr_state.cpp
@@ -584,34 +584,20 @@ swr_set_vertex_buffers(struct pipe_context *pipe,
&ctx->num_vertex_buffers,
buffers,
start_slot,
num_elements);
ctx->dirty |= SWR_NEW_VERTEX;
}
static void
-swr_set_index_buffer(struct pipe_context *pipe,
- const struct pipe_index_buffer *ib)
-{
- struct swr_context *ctx = swr_context(pipe);
-
- if (ib)
- memcpy(&ctx->index_buffer, ib, sizeof(ctx->index_buffer));
- else
- memset(&ctx->index_buffer, 0, sizeof(ctx->index_buffer));
-
- ctx->dirty |= SWR_NEW_VERTEX;
-}
-
-static void
swr_set_polygon_stipple(struct pipe_context *pipe,
const struct pipe_poly_stipple *stipple)
{
struct swr_context *ctx = swr_context(pipe);
ctx->poly_stipple.pipe = *stipple; /* struct copy */
ctx->dirty |= SWR_NEW_STIPPLE;
}
static void
@@ -742,24 +728,23 @@ swr_update_resource_status(struct pipe_context *pipe,
swr_resource_write(fb->zsbuf->texture);
/* VBO vertex buffers */
for (uint32_t i = 0; i < ctx->num_vertex_buffers; i++) {
struct pipe_vertex_buffer *vb = &ctx->vertex_buffer[i];
if (!vb->is_user_buffer)
swr_resource_read(vb->buffer.resource);
}
/* VBO index buffer */
- if (p_draw_info && p_draw_info->indexed) {
- struct pipe_index_buffer *ib = &ctx->index_buffer;
- if (!ib->user_buffer)
- swr_resource_read(ib->buffer);
+ if (p_draw_info && p_draw_info->index_size) {
+ if (!p_draw_info->has_user_indices)
+ swr_resource_read(p_draw_info->index.resource);
}
/* transform feedback buffers */
for (uint32_t i = 0; i < ctx->num_so_targets; i++) {
struct pipe_stream_output_target *target = ctx->so_targets[i];
if (target && target->buffer)
swr_resource_write(target->buffer);
}
/* texture sampler views */
@@ -1211,21 +1196,24 @@ swr_update_derived(struct pipe_context *pipe,
vp->y = 0.0f;
}
vp->width = std::min(vp->width, (float)fb->width - vp->x);
vp->height = std::min(vp->height, (float)fb->height - vp->y);
SwrSetViewports(ctx->swrContext, 1, vp, vpm);
}
/* Set vertex & index buffers */
/* (using draw info if called by swr_draw_vbo) */
- if (ctx->dirty & SWR_NEW_VERTEX) {
+ /* TODO: This is always true, because the index buffer comes from
+ * pipe_draw_info.
+ */
+ if (1 || ctx->dirty & SWR_NEW_VERTEX) {
uint32_t scratch_total;
uint8_t *scratch = NULL;
/* If being called by swr_draw_vbo, copy draw details */
struct pipe_draw_info info = {0};
if (p_draw_info)
info = *p_draw_info;
/* We must get all the scratch space in one go */
scratch_total = 0;
@@ -1292,52 +1280,51 @@ swr_update_derived(struct pipe_context *pipe,
swrVertexBuffers[i].minVertex = min_vertex_index;
swrVertexBuffers[i].maxVertex = elems;
swrVertexBuffers[i].partialInboundsSize = partial_inbounds;
}
SwrSetVertexBuffers(
ctx->swrContext, ctx->num_vertex_buffers, swrVertexBuffers);
/* index buffer, if required (info passed in by swr_draw_vbo) */
SWR_FORMAT index_type = R32_UINT; /* Default for non-indexed draws */
- if (info.indexed) {
+ if (info.index_size) {
const uint8_t *p_data;
uint32_t size, pitch;
- struct pipe_index_buffer *ib = &ctx->index_buffer;
- pitch = ib->index_size ? ib->index_size : sizeof(uint32_t);
+ pitch = p_draw_info->index_size ? p_draw_info->index_size : sizeof(uint32_t);
index_type = swr_convert_index_type(pitch);
- if (!ib->user_buffer) {
+ if (!info.has_user_indices) {
/* VBO
* size is based on buffer->width0 rather than info.count
* to prevent having to validate VBO on each draw */
- size = ib->buffer->width0;
- p_data = swr_resource_data(ib->buffer) + ib->offset;
+ size = info.index.resource->width0;
+ p_data = swr_resource_data(info.index.resource);
} else {
/* Client buffer
* client memory is one-time use, re-trigger SWR_NEW_VERTEX to
* revalidate on each draw */
post_update_dirty_flags |= SWR_NEW_VERTEX;
size = info.count * pitch;
size = AlignUp(size, 4);
/* Copy indices to scratch space */
- const void *ptr = ib->user_buffer;
+ const void *ptr = info.index.user;
ptr = swr_copy_to_scratch_space(
ctx, &ctx->scratch->index_buffer, ptr, size);
p_data = (const uint8_t *)ptr;
}
SWR_INDEX_BUFFER_STATE swrIndexBuffer;
- swrIndexBuffer.format = swr_convert_index_type(ib->index_size);
+ swrIndexBuffer.format = swr_convert_index_type(p_draw_info->index_size);
swrIndexBuffer.pIndices = p_data;
swrIndexBuffer.size = size;
SwrSetIndexBuffer(ctx->swrContext, &swrIndexBuffer);
}
struct swr_vertex_element_state *velems = ctx->velems;
if (velems && velems->fsState.indexType != index_type) {
velems->fsFunc = NULL;
velems->fsState.indexType = index_type;
@@ -1839,21 +1826,20 @@ swr_state_init(struct pipe_context *pipe)
pipe->bind_gs_state = swr_bind_gs_state;
pipe->delete_gs_state = swr_delete_gs_state;
pipe->set_constant_buffer = swr_set_constant_buffer;
pipe->create_vertex_elements_state = swr_create_vertex_elements_state;
pipe->bind_vertex_elements_state = swr_bind_vertex_elements_state;
pipe->delete_vertex_elements_state = swr_delete_vertex_elements_state;
pipe->set_vertex_buffers = swr_set_vertex_buffers;
- pipe->set_index_buffer = swr_set_index_buffer;
pipe->set_polygon_stipple = swr_set_polygon_stipple;
pipe->set_clip_state = swr_set_clip_state;
pipe->set_scissor_states = swr_set_scissor_states;
pipe->set_viewport_states = swr_set_viewport_states;
pipe->set_framebuffer_state = swr_set_framebuffer_state;
pipe->set_blend_color = swr_set_blend_color;
pipe->set_stencil_ref = swr_set_stencil_ref;
diff --git a/src/gallium/drivers/trace/tr_context.c b/src/gallium/drivers/trace/tr_context.c
index eafee7f..c5563a4 100644
--- a/src/gallium/drivers/trace/tr_context.c
+++ b/src/gallium/drivers/trace/tr_context.c
@@ -1027,38 +1027,20 @@ trace_context_set_vertex_buffers(struct pipe_context *_pipe,
trace_dump_arg_begin("buffers");
trace_dump_struct_array(vertex_buffer, buffers, num_buffers);
trace_dump_arg_end();
pipe->set_vertex_buffers(pipe, start_slot, num_buffers, buffers);
trace_dump_call_end();
}
-static void
-trace_context_set_index_buffer(struct pipe_context *_pipe,
- const struct pipe_index_buffer *ib)
-{
- struct trace_context *tr_ctx = trace_context(_pipe);
- struct pipe_context *pipe = tr_ctx->pipe;
-
- trace_dump_call_begin("pipe_context", "set_index_buffer");
-
- trace_dump_arg(ptr, pipe);
- trace_dump_arg(index_buffer, ib);
-
- pipe->set_index_buffer(pipe, ib);
-
- trace_dump_call_end();
-}
-
-
static struct pipe_stream_output_target *
trace_context_create_stream_output_target(struct pipe_context *_pipe,
struct pipe_resource *res,
unsigned buffer_offset,
unsigned buffer_size)
{
struct trace_context *tr_ctx = trace_context(_pipe);
struct pipe_context *pipe = tr_ctx->pipe;
struct pipe_stream_output_target *result;
@@ -1797,21 +1779,20 @@ trace_context_create(struct trace_screen *tr_scr,
TR_CTX_INIT(set_framebuffer_state);
TR_CTX_INIT(set_polygon_stipple);
TR_CTX_INIT(set_scissor_states);
TR_CTX_INIT(set_viewport_states);
TR_CTX_INIT(set_sampler_views);
TR_CTX_INIT(create_sampler_view);
TR_CTX_INIT(sampler_view_destroy);
TR_CTX_INIT(create_surface);
TR_CTX_INIT(surface_destroy);
TR_CTX_INIT(set_vertex_buffers);
- TR_CTX_INIT(set_index_buffer);
TR_CTX_INIT(create_stream_output_target);
TR_CTX_INIT(stream_output_target_destroy);
TR_CTX_INIT(set_stream_output_targets);
TR_CTX_INIT(resource_copy_region);
TR_CTX_INIT(blit);
TR_CTX_INIT(flush_resource);
TR_CTX_INIT(clear);
TR_CTX_INIT(clear_render_target);
TR_CTX_INIT(clear_depth_stencil);
TR_CTX_INIT(clear_texture);
diff --git a/src/gallium/drivers/trace/tr_dump_state.c b/src/gallium/drivers/trace/tr_dump_state.c
index e4a5e3b..41f7faf 100644
--- a/src/gallium/drivers/trace/tr_dump_state.c
+++ b/src/gallium/drivers/trace/tr_dump_state.c
@@ -654,41 +654,20 @@ void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state)
trace_dump_member(uint, state, stride);
trace_dump_member(bool, state, is_user_buffer);
trace_dump_member(uint, state, buffer_offset);
trace_dump_member(ptr, state, buffer.resource);
trace_dump_struct_end();
}
-void trace_dump_index_buffer(const struct pipe_index_buffer *state)
-{
- if (!trace_dumping_enabled_locked())
- return;
-
- if (!state) {
- trace_dump_null();
- return;
- }
-
- trace_dump_struct_begin("pipe_index_buffer");
-
- trace_dump_member(uint, state, index_size);
- trace_dump_member(uint, state, offset);
- trace_dump_member(ptr, state, buffer);
- trace_dump_member(ptr, state, user_buffer);
-
- trace_dump_struct_end();
-}
-
-
void trace_dump_vertex_element(const struct pipe_vertex_element *state)
{
if (!trace_dumping_enabled_locked())
return;
if (!state) {
trace_dump_null();
return;
}
@@ -785,38 +764,40 @@ void trace_dump_draw_info(const struct pipe_draw_info *state)
if (!trace_dumping_enabled_locked())
return;
if (!state) {
trace_dump_null();
return;
}
trace_dump_struct_begin("pipe_draw_info");
- trace_dump_member(bool, state, indexed);
+ trace_dump_member(uint, state, index_size);
+ trace_dump_member(uint, state, has_user_indices);
trace_dump_member(uint, state, mode);
trace_dump_member(uint, state, start);
trace_dump_member(uint, state, count);
trace_dump_member(uint, state, start_instance);
trace_dump_member(uint, state, instance_count);
trace_dump_member(uint, state, vertices_per_patch);
trace_dump_member(int, state, index_bias);
trace_dump_member(uint, state, min_index);
trace_dump_member(uint, state, max_index);
trace_dump_member(bool, state, primitive_restart);
trace_dump_member(uint, state, restart_index);
+ trace_dump_member(ptr, state, index.resource);
trace_dump_member(ptr, state, count_from_stream_output);
if (!state->indirect) {
trace_dump_member(ptr, state, indirect);
} else {
trace_dump_member(uint, state, indirect->offset);
trace_dump_member(uint, state, indirect->stride);
trace_dump_member(uint, state, indirect->draw_count);
trace_dump_member(uint, state, indirect->indirect_draw_count_offset);
trace_dump_member(ptr, state, indirect->buffer);
diff --git a/src/gallium/drivers/trace/tr_dump_state.h b/src/gallium/drivers/trace/tr_dump_state.h
index fd2bc50..baff025 100644
--- a/src/gallium/drivers/trace/tr_dump_state.h
+++ b/src/gallium/drivers/trace/tr_dump_state.h
@@ -67,22 +67,20 @@ void trace_dump_sampler_state(const struct pipe_sampler_state *state);
void trace_dump_sampler_view_template(const struct pipe_sampler_view *view,
enum pipe_texture_target target);
void trace_dump_surface_template(const struct pipe_surface *state,
enum pipe_texture_target target);
void trace_dump_transfer(const struct pipe_transfer *state);
void trace_dump_vertex_buffer(const struct pipe_vertex_buffer *state);
-void trace_dump_index_buffer(const struct pipe_index_buffer *state);
-
void trace_dump_vertex_element(const struct pipe_vertex_element *state);
void trace_dump_constant_buffer(const struct pipe_constant_buffer *state);
void trace_dump_shader_buffer(const struct pipe_shader_buffer *buffer);
void trace_dump_draw_info(const struct pipe_draw_info *state);
void trace_dump_blit_info(const struct pipe_blit_info *);
diff --git a/src/gallium/drivers/vc4/vc4_context.h b/src/gallium/drivers/vc4/vc4_context.h
index 6bd2424..3fe4395 100644
--- a/src/gallium/drivers/vc4/vc4_context.h
+++ b/src/gallium/drivers/vc4/vc4_context.h
@@ -60,21 +60,21 @@
#define VC4_DIRTY_BLEND_COLOR (1 << 7)
#define VC4_DIRTY_STENCIL_REF (1 << 8)
#define VC4_DIRTY_SAMPLE_MASK (1 << 9)
#define VC4_DIRTY_FRAMEBUFFER (1 << 10)
#define VC4_DIRTY_STIPPLE (1 << 11)
#define VC4_DIRTY_VIEWPORT (1 << 12)
#define VC4_DIRTY_CONSTBUF (1 << 13)
#define VC4_DIRTY_VTXSTATE (1 << 14)
#define VC4_DIRTY_VTXBUF (1 << 15)
-#define VC4_DIRTY_INDEXBUF (1 << 16)
+
#define VC4_DIRTY_SCISSOR (1 << 17)
#define VC4_DIRTY_FLAT_SHADE_FLAGS (1 << 18)
#define VC4_DIRTY_PRIM_MODE (1 << 19)
#define VC4_DIRTY_CLIP (1 << 20)
#define VC4_DIRTY_UNCOMPILED_VS (1 << 21)
#define VC4_DIRTY_UNCOMPILED_FS (1 << 22)
#define VC4_DIRTY_COMPILED_CS (1 << 23)
#define VC4_DIRTY_COMPILED_VS (1 << 24)
#define VC4_DIRTY_COMPILED_FS (1 << 25)
#define VC4_DIRTY_FS_INPUTS (1 << 26)
@@ -370,21 +370,20 @@ struct vc4_context {
uint8_t ub[4];
} blend_color;
struct pipe_stencil_ref stencil_ref;
unsigned sample_mask;
struct pipe_framebuffer_state framebuffer;
struct pipe_poly_stipple stipple;
struct pipe_clip_state clip;
struct pipe_viewport_state viewport;
struct vc4_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
struct vc4_vertexbuf_stateobj vertexbuf;
- struct pipe_index_buffer indexbuf;
/** @} */
};
struct vc4_rasterizer_state {
struct pipe_rasterizer_state base;
/* VC4_CONFIGURATION_BITS */
uint8_t config_bits[3];
float point_size;
diff --git a/src/gallium/drivers/vc4/vc4_draw.c b/src/gallium/drivers/vc4/vc4_draw.c
index 01fae27..0aee73e 100644
--- a/src/gallium/drivers/vc4/vc4_draw.c
+++ b/src/gallium/drivers/vc4/vc4_draw.c
@@ -282,21 +282,20 @@ static void
vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
struct vc4_context *vc4 = vc4_context(pctx);
if (!info->count_from_stream_output && !info->indirect &&
!info->primitive_restart &&
!u_trim_pipe_prim(info->mode, (unsigned*)&info->count))
return;
if (info->mode >= PIPE_PRIM_QUADS) {
- util_primconvert_save_index_buffer(vc4->primconvert, &vc4->indexbuf);
util_primconvert_save_rasterizer_state(vc4->primconvert, &vc4->rasterizer->base);
util_primconvert_draw_vbo(vc4->primconvert, info);
perf_debug("Fallback conversion for %d %s vertices\n",
info->count, u_prim_name(info->mode));
return;
}
/* Before setting up the draw, do any fixup blits necessary. */
vc4_predraw_check_textures(pctx, &vc4->verttex);
vc4_predraw_check_textures(pctx, &vc4->fragtex);
@@ -333,54 +332,55 @@ vc4_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
vc4->last_index_bias != info->index_bias) {
vc4_emit_gl_shader_state(vc4, info, 0);
}
vc4->dirty = 0;
/* Note that the primitive type fields match with OpenGL/gallium
* definitions, up to but not including QUADS.
*/
struct vc4_cl_out *bcl = cl_start(&job->bcl);
- if (info->indexed) {
- uint32_t offset = vc4->indexbuf.offset;
- uint32_t index_size = vc4->indexbuf.index_size;
+ if (info->index_size) {
+ uint32_t index_size = info->index_size;
+ uint32_t offset = info->start * index_size;
struct pipe_resource *prsc;
- if (vc4->indexbuf.index_size == 4) {
- prsc = vc4_get_shadow_index_buffer(pctx, &vc4->indexbuf,
+ if (info->index_size == 4) {
+ prsc = vc4_get_shadow_index_buffer(pctx, info,
+ offset,
info->count, &offset);
index_size = 2;
} else {
- if (vc4->indexbuf.user_buffer) {
+ if (info->has_user_indices) {
prsc = NULL;
u_upload_data(vc4->uploader, 0,
info->count * index_size, 4,
- vc4->indexbuf.user_buffer,
+ info->index.user,
&offset, &prsc);
} else {
- prsc = vc4->indexbuf.buffer;
+ prsc = info->index.resource;
}
}
struct vc4_resource *rsc = vc4_resource(prsc);
cl_start_reloc(&job->bcl, &bcl, 1);
cl_u8(&bcl, VC4_PACKET_GL_INDEXED_PRIMITIVE);
cl_u8(&bcl,
info->mode |
(index_size == 2 ?
VC4_INDEX_BUFFER_U16:
VC4_INDEX_BUFFER_U8));
cl_u32(&bcl, info->count);
cl_reloc(job, &job->bcl, &bcl, rsc->bo, offset);
cl_u32(&bcl, vc4->max_index);
job->draw_calls_queued++;
- if (vc4->indexbuf.index_size == 4 || vc4->indexbuf.user_buffer)
+ if (info->index_size == 4 || info->has_user_indices)
pipe_resource_reference(&prsc, NULL);
} else {
uint32_t count = info->count;
uint32_t start = info->start;
uint32_t extra_index_bias = 0;
while (count) {
uint32_t this_count = count;
uint32_t step = count;
static const uint32_t max_verts = 65535;
diff --git a/src/gallium/drivers/vc4/vc4_resource.c b/src/gallium/drivers/vc4/vc4_resource.c
index e4784ff..5aeb65e 100644
--- a/src/gallium/drivers/vc4/vc4_resource.c
+++ b/src/gallium/drivers/vc4/vc4_resource.c
@@ -972,41 +972,42 @@ vc4_update_shadow_baselevel_texture(struct pipe_context *pctx,
* Since GLES2 only has support for 1 and 2-byte indices, the hardware doesn't
* include 4-byte index support, and we have to shrink it down.
*
* There's no fallback support for when indices end up being larger than 2^16,
* though it will at least assertion fail. Also, if the original index data
* was in user memory, it would be nice to not have uploaded it to a VBO
* before translating.
*/
struct pipe_resource *
vc4_get_shadow_index_buffer(struct pipe_context *pctx,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
+ uint32_t offset,
uint32_t count,
uint32_t *shadow_offset)
{
struct vc4_context *vc4 = vc4_context(pctx);
- struct vc4_resource *orig = vc4_resource(ib->buffer);
+ struct vc4_resource *orig = vc4_resource(info->index.resource);
perf_debug("Fallback conversion for %d uint indices\n", count);
void *data;
struct pipe_resource *shadow_rsc = NULL;
u_upload_alloc(vc4->uploader, 0, count * 2, 4,
shadow_offset, &shadow_rsc, &data);
uint16_t *dst = data;
struct pipe_transfer *src_transfer = NULL;
const uint32_t *src;
- if (ib->user_buffer) {
- src = ib->user_buffer;
+ if (info->has_user_indices) {
+ src = info->index.user;
} else {
src = pipe_buffer_map_range(pctx, &orig->base.b,
- ib->offset,
+ offset,
count * 4,
PIPE_TRANSFER_READ, &src_transfer);
}
for (int i = 0; i < count; i++) {
uint32_t src_index = src[i];
assert(src_index <= 0xffff);
dst[i] = src_index;
}
diff --git a/src/gallium/drivers/vc4/vc4_resource.h b/src/gallium/drivers/vc4/vc4_resource.h
index 27aa4e8..72af1d5 100644
--- a/src/gallium/drivers/vc4/vc4_resource.h
+++ b/src/gallium/drivers/vc4/vc4_resource.h
@@ -114,16 +114,17 @@ vc4_transfer(struct pipe_transfer *ptrans)
return (struct vc4_transfer *)ptrans;
}
void vc4_resource_screen_init(struct pipe_screen *pscreen);
void vc4_resource_context_init(struct pipe_context *pctx);
struct pipe_resource *vc4_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl);
void vc4_update_shadow_baselevel_texture(struct pipe_context *pctx,
struct pipe_sampler_view *view);
struct pipe_resource *vc4_get_shadow_index_buffer(struct pipe_context *pctx,
- const struct pipe_index_buffer *ib,
+ const struct pipe_draw_info *info,
+ uint32_t offset,
uint32_t count,
- uint32_t *offset);
+ uint32_t *shadow_offset);
void vc4_dump_surface(struct pipe_surface *psurf);
#endif /* VC4_RESOURCE_H */
diff --git a/src/gallium/drivers/vc4/vc4_state.c b/src/gallium/drivers/vc4/vc4_state.c
index 2e00104..fe33946 100644
--- a/src/gallium/drivers/vc4/vc4_state.c
+++ b/src/gallium/drivers/vc4/vc4_state.c
@@ -295,38 +295,20 @@ vc4_set_vertex_buffers(struct pipe_context *pctx,
struct vc4_vertexbuf_stateobj *so = &vc4->vertexbuf;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
start_slot, count);
so->count = util_last_bit(so->enabled_mask);
vc4->dirty |= VC4_DIRTY_VTXBUF;
}
static void
-vc4_set_index_buffer(struct pipe_context *pctx,
- const struct pipe_index_buffer *ib)
-{
- struct vc4_context *vc4 = vc4_context(pctx);
-
- if (ib) {
- pipe_resource_reference(&vc4->indexbuf.buffer, ib->buffer);
- vc4->indexbuf.index_size = ib->index_size;
- vc4->indexbuf.offset = ib->offset;
- vc4->indexbuf.user_buffer = ib->user_buffer;
- } else {
- pipe_resource_reference(&vc4->indexbuf.buffer, NULL);
- }
-
- vc4->dirty |= VC4_DIRTY_INDEXBUF;
-}
-
-static void
vc4_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct vc4_context *vc4 = vc4_context(pctx);
vc4->blend = hwcso;
vc4->dirty |= VC4_DIRTY_BLEND;
}
static void
vc4_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
{
@@ -663,21 +645,20 @@ vc4_state_init(struct pipe_context *pctx)
pctx->set_stencil_ref = vc4_set_stencil_ref;
pctx->set_clip_state = vc4_set_clip_state;
pctx->set_sample_mask = vc4_set_sample_mask;
pctx->set_constant_buffer = vc4_set_constant_buffer;
pctx->set_framebuffer_state = vc4_set_framebuffer_state;
pctx->set_polygon_stipple = vc4_set_polygon_stipple;
pctx->set_scissor_states = vc4_set_scissor_states;
pctx->set_viewport_states = vc4_set_viewport_states;
pctx->set_vertex_buffers = vc4_set_vertex_buffers;
- pctx->set_index_buffer = vc4_set_index_buffer;
pctx->create_blend_state = vc4_create_blend_state;
pctx->bind_blend_state = vc4_blend_state_bind;
pctx->delete_blend_state = vc4_generic_cso_state_delete;
pctx->create_rasterizer_state = vc4_create_rasterizer_state;
pctx->bind_rasterizer_state = vc4_rasterizer_state_bind;
pctx->delete_rasterizer_state = vc4_generic_cso_state_delete;
pctx->create_depth_stencil_alpha_state = vc4_create_depth_stencil_alpha_state;
diff --git a/src/gallium/drivers/virgl/virgl_context.c b/src/gallium/drivers/virgl/virgl_context.c
index 8d7094f..232d295 100644
--- a/src/gallium/drivers/virgl/virgl_context.c
+++ b/src/gallium/drivers/virgl/virgl_context.c
@@ -123,26 +123,27 @@ static void virgl_attach_res_vertex_buffers(struct virgl_context *vctx)
struct virgl_resource *res;
unsigned i;
for (i = 0; i < vctx->num_vertex_buffers; i++) {
res = virgl_resource(vctx->vertex_buffer[i].buffer.resource);
if (res)
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
}
-static void virgl_attach_res_index_buffer(struct virgl_context *vctx)
+static void virgl_attach_res_index_buffer(struct virgl_context *vctx,
+ struct virgl_indexbuf *ib)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
struct virgl_resource *res;
- res = virgl_resource(vctx->index_buffer.buffer);
+ res = virgl_resource(ib->buffer);
if (res)
vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
}
static void virgl_attach_res_so_targets(struct virgl_context *vctx)
{
struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
struct virgl_resource *res;
unsigned i;
@@ -176,21 +177,20 @@ static void virgl_reemit_res(struct virgl_context *vctx)
enum pipe_shader_type shader_type;
/* reattach any flushed resources */
/* framebuffer, sampler views, vertex/index/uniform/stream buffers */
virgl_attach_res_framebuffer(vctx);
for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
virgl_attach_res_sampler_views(vctx, shader_type);
virgl_attach_res_uniform_buffers(vctx, shader_type);
}
- virgl_attach_res_index_buffer(vctx);
virgl_attach_res_vertex_buffers(vctx);
virgl_attach_res_so_targets(vctx);
}
static struct pipe_surface *virgl_create_surface(struct pipe_context *ctx,
struct pipe_resource *resource,
const struct pipe_surface *templ)
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_surface *surf;
@@ -397,39 +397,26 @@ static void virgl_set_stencil_ref(struct pipe_context *ctx,
virgl_encoder_set_stencil_ref(vctx, ref);
}
static void virgl_set_blend_color(struct pipe_context *ctx,
const struct pipe_blend_color *color)
{
struct virgl_context *vctx = virgl_context(ctx);
virgl_encoder_set_blend_color(vctx, color);
}
-static void virgl_set_index_buffer(struct pipe_context *ctx,
- const struct pipe_index_buffer *ib)
-{
- struct virgl_context *vctx = virgl_context(ctx);
-
- if (ib) {
- pipe_resource_reference(&vctx->index_buffer.buffer, ib->buffer);
- memcpy(&vctx->index_buffer, ib, sizeof(*ib));
- } else {
- pipe_resource_reference(&vctx->index_buffer.buffer, NULL);
- }
-}
-
static void virgl_hw_set_index_buffer(struct pipe_context *ctx,
- struct pipe_index_buffer *ib)
+ struct virgl_indexbuf *ib)
{
struct virgl_context *vctx = virgl_context(ctx);
virgl_encoder_set_index_buffer(vctx, ib);
- virgl_attach_res_index_buffer(vctx);
+ virgl_attach_res_index_buffer(vctx, ib);
}
static void virgl_set_constant_buffer(struct pipe_context *ctx,
enum pipe_shader_type shader, uint index,
const struct pipe_constant_buffer *buf)
{
struct virgl_context *vctx = virgl_context(ctx);
if (buf) {
if (!buf->user_buffer){
@@ -583,51 +570,50 @@ static void virgl_clear(struct pipe_context *ctx,
struct virgl_context *vctx = virgl_context(ctx);
virgl_encode_clear(vctx, buffers, color, depth, stencil);
}
static void virgl_draw_vbo(struct pipe_context *ctx,
const struct pipe_draw_info *dinfo)
{
struct virgl_context *vctx = virgl_context(ctx);
struct virgl_screen *rs = virgl_screen(ctx->screen);
- struct pipe_index_buffer ib = {};
+ struct virgl_indexbuf ib = {};
struct pipe_draw_info info = *dinfo;
if (!dinfo->count_from_stream_output && !dinfo->indirect &&
!dinfo->primitive_restart &&
!u_trim_pipe_prim(dinfo->mode, (unsigned*)&dinfo->count))
return;
if (!(rs->caps.caps.v1.prim_mask & (1 << dinfo->mode))) {
- util_primconvert_save_index_buffer(vctx->primconvert, &vctx->index_buffer);
util_primconvert_draw_vbo(vctx->primconvert, dinfo);
return;
}
- if (info.indexed) {
- pipe_resource_reference(&ib.buffer, vctx->index_buffer.buffer);
- ib.user_buffer = vctx->index_buffer.user_buffer;
- ib.index_size = vctx->index_buffer.index_size;
- ib.offset = vctx->index_buffer.offset + info.start * ib.index_size;
+ if (info.index_size) {
+ pipe_resource_reference(&ib.buffer, info.has_user_indices ? NULL : info.index.resource);
+ ib.user_buffer = info.has_user_indices ? info.index.user : NULL;
+ ib.index_size = dinfo->index_size;
+ ib.offset = info.start * ib.index_size;
if (ib.user_buffer) {
u_upload_data(vctx->uploader, 0, info.count * ib.index_size, 256,
ib.user_buffer, &ib.offset, &ib.buffer);
ib.user_buffer = NULL;
}
}
u_upload_unmap(vctx->uploader);
vctx->num_draws++;
virgl_hw_set_vertex_buffers(ctx);
- if (info.indexed)
+ if (info.index_size)
virgl_hw_set_index_buffer(ctx, &ib);
virgl_encoder_draw_vbo(vctx, &info);
pipe_resource_reference(&ib.buffer, NULL);
}
static void virgl_flush_eq(struct virgl_context *ctx, void *closure)
{
@@ -898,21 +884,20 @@ struct pipe_context *virgl_context_create(struct pipe_screen *pscreen,
vctx->base.delete_depth_stencil_alpha_state = virgl_delete_depth_stencil_alpha_state;
vctx->base.create_rasterizer_state = virgl_create_rasterizer_state;
vctx->base.bind_rasterizer_state = virgl_bind_rasterizer_state;
vctx->base.delete_rasterizer_state = virgl_delete_rasterizer_state;
vctx->base.set_viewport_states = virgl_set_viewport_states;
vctx->base.create_vertex_elements_state = virgl_create_vertex_elements_state;
vctx->base.bind_vertex_elements_state = virgl_bind_vertex_elements_state;
vctx->base.delete_vertex_elements_state = virgl_delete_vertex_elements_state;
vctx->base.set_vertex_buffers = virgl_set_vertex_buffers;
- vctx->base.set_index_buffer = virgl_set_index_buffer;
vctx->base.set_constant_buffer = virgl_set_constant_buffer;
vctx->base.create_vs_state = virgl_create_vs_state;
vctx->base.create_gs_state = virgl_create_gs_state;
vctx->base.create_fs_state = virgl_create_fs_state;
vctx->base.bind_vs_state = virgl_bind_vs_state;
vctx->base.bind_gs_state = virgl_bind_gs_state;
vctx->base.bind_fs_state = virgl_bind_fs_state;
diff --git a/src/gallium/drivers/virgl/virgl_context.h b/src/gallium/drivers/virgl/virgl_context.h
index 597ed49..d8d4ccb 100644
--- a/src/gallium/drivers/virgl/virgl_context.h
+++ b/src/gallium/drivers/virgl/virgl_context.h
@@ -51,21 +51,20 @@ struct virgl_textures_info {
struct virgl_context {
struct pipe_context base;
struct virgl_cmd_buf *cbuf;
struct virgl_textures_info samplers[PIPE_SHADER_TYPES];
struct pipe_framebuffer_state framebuffer;
struct slab_child_pool texture_transfer_pool;
- struct pipe_index_buffer index_buffer;
struct u_upload_mgr *uploader;
struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS];
unsigned num_vertex_buffers;
boolean vertex_array_dirty;
struct virgl_so_target so_targets[PIPE_MAX_SO_BUFFERS];
unsigned num_so_targets;
struct pipe_resource *ubos[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
diff --git a/src/gallium/drivers/virgl/virgl_encode.c b/src/gallium/drivers/virgl/virgl_encode.c
index 8a6a5fb..3c8a0f4 100644
--- a/src/gallium/drivers/virgl/virgl_encode.c
+++ b/src/gallium/drivers/virgl/virgl_encode.c
@@ -391,44 +391,44 @@ int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
for (i = 0; i < num_buffers; i++) {
struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
virgl_encoder_write_res(ctx, res);
}
return 0;
}
int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
- const struct pipe_index_buffer *ib)
+ const struct virgl_indexbuf *ib)
{
int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
struct virgl_resource *res = NULL;
if (ib)
res = virgl_resource(ib->buffer);
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
virgl_encoder_write_res(ctx, res);
if (ib) {
virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
- virgl_encoder_write_dword(ctx->cbuf, ib->offset);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
}
return 0;
}
int virgl_encoder_draw_vbo(struct virgl_context *ctx,
const struct pipe_draw_info *info)
{
virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, VIRGL_DRAW_VBO_SIZE));
virgl_encoder_write_dword(ctx->cbuf, info->start);
virgl_encoder_write_dword(ctx->cbuf, info->count);
virgl_encoder_write_dword(ctx->cbuf, info->mode);
- virgl_encoder_write_dword(ctx->cbuf, info->indexed);
+ virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
virgl_encoder_write_dword(ctx->cbuf, info->min_index);
virgl_encoder_write_dword(ctx->cbuf, info->max_index);
if (info->count_from_stream_output)
virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
else
diff --git a/src/gallium/drivers/virgl/virgl_encode.h b/src/gallium/drivers/virgl/virgl_encode.h
index 78d4194..02c032d 100644
--- a/src/gallium/drivers/virgl/virgl_encode.h
+++ b/src/gallium/drivers/virgl/virgl_encode.h
@@ -32,20 +32,27 @@ struct tgsi_token;
struct virgl_context;
struct virgl_resource;
struct virgl_sampler_view;
struct virgl_surface {
struct pipe_surface base;
uint32_t handle;
};
+struct virgl_indexbuf {
+ unsigned offset;
+ unsigned index_size; /**< size of an index, in bytes */
+ struct pipe_resource *buffer; /**< the actual buffer */
+ const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
+};
+
static inline struct virgl_surface *virgl_surface(struct pipe_surface *surf)
{
return (struct virgl_surface *)surf;
}
static inline void virgl_encoder_write_dword(struct virgl_cmd_buf *state,
uint32_t dword)
{
state->buf[state->cdw++] = dword;
}
@@ -160,21 +167,21 @@ int virgl_encode_set_sampler_views(struct virgl_context *ctx,
uint32_t num_views,
struct virgl_sampler_view **views);
int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
uint32_t shader_type,
uint32_t start_slot,
uint32_t num_handles,
uint32_t *handles);
int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
- const struct pipe_index_buffer *ib);
+ const struct virgl_indexbuf *ib);
uint32_t virgl_object_assign_handle(void);
int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
uint32_t shader,
uint32_t index,
uint32_t size,
const void *data);
int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
diff --git a/src/gallium/include/pipe/p_context.h b/src/gallium/include/pipe/p_context.h
index 4d5535b..4b75386 100644
--- a/src/gallium/include/pipe/p_context.h
+++ b/src/gallium/include/pipe/p_context.h
@@ -46,21 +46,20 @@ struct pipe_box;
struct pipe_clip_state;
struct pipe_constant_buffer;
struct pipe_debug_callback;
struct pipe_depth_stencil_alpha_state;
struct pipe_device_reset_callback;
struct pipe_draw_info;
struct pipe_grid_info;
struct pipe_fence_handle;
struct pipe_framebuffer_state;
struct pipe_image_view;
-struct pipe_index_buffer;
struct pipe_query;
struct pipe_poly_stipple;
struct pipe_rasterizer_state;
struct pipe_resolve_info;
struct pipe_resource;
struct pipe_sampler_state;
struct pipe_sampler_view;
struct pipe_scissor_state;
struct pipe_shader_buffer;
struct pipe_shader_state;
@@ -347,23 +346,20 @@ struct pipe_context {
void (*set_shader_images)(struct pipe_context *,
enum pipe_shader_type shader,
unsigned start_slot, unsigned count,
const struct pipe_image_view *images);
void (*set_vertex_buffers)( struct pipe_context *,
unsigned start_slot,
unsigned num_buffers,
const struct pipe_vertex_buffer * );
- void (*set_index_buffer)( struct pipe_context *pipe,
- const struct pipe_index_buffer * );
-
/*@}*/
/**
* Stream output functions.
*/
/*@{*/
struct pipe_stream_output_target *(*create_stream_output_target)(
struct pipe_context *,
struct pipe_resource *,
diff --git a/src/gallium/include/pipe/p_state.h b/src/gallium/include/pipe/p_state.h
index 3cfdd34..15be8cb 100644
--- a/src/gallium/include/pipe/p_state.h
+++ b/src/gallium/include/pipe/p_state.h
@@ -621,43 +621,30 @@ struct pipe_vertex_element
enum pipe_format src_format:11;
/** Instance data rate divisor. 0 means this is per-vertex data,
* n means per-instance data used for n consecutive instances (n > 0).
*/
unsigned instance_divisor;
};
-/**
- * An index buffer. When an index buffer is bound, all indices to vertices
- * will be looked up in the buffer.
- */
-struct pipe_index_buffer
-{
- unsigned index_size; /**< size of an index, in bytes */
- unsigned offset; /**< offset to start of data in buffer, in bytes */
- struct pipe_resource *buffer; /**< the actual buffer */
- const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
-};
-
-
struct pipe_draw_indirect_info
{
unsigned offset; /**< must be 4 byte aligned */
unsigned stride; /**< must be 4 byte aligned */
unsigned draw_count; /**< number of indirect draws */
unsigned indirect_draw_count_offset; /**< must be 4 byte aligned */
/* Indirect draw parameters resource is laid out as follows:
*
- * if indexed is TRUE:
+ * if using indexed drawing:
* struct {
* uint32_t count;
* uint32_t instance_count;
* uint32_t start;
* int32_t index_bias;
* uint32_t start_instance;
* };
* otherwise:
* struct {
* uint32_t count;
@@ -673,26 +660,32 @@ struct pipe_draw_indirect_info
*/
struct pipe_resource *indirect_draw_count;
};
/**
* Information to describe a draw_vbo call.
*/
struct pipe_draw_info
{
- boolean indexed; /**< use index buffer */
+ ubyte index_size; /**< if 0, the draw is not indexed. */
enum pipe_prim_type mode:8; /**< the mode of the primitive */
- boolean primitive_restart;
+ unsigned primitive_restart:1;
+ unsigned has_user_indices:1; /**< if true, use index.user_buffer */
ubyte vertices_per_patch; /**< the number of vertices per patch */
- unsigned start; /**< the index of the first vertex */
+ /**
+ * Direct draws: start is the index of the first vertex
+ * Non-indexed indirect draws: not used
+ * Indexed indirect draws: start is added to the indirect start.
+ */
+ unsigned start;
unsigned count; /**< number of vertices */
unsigned start_instance; /**< first instance id */
unsigned instance_count; /**< number of instances */
unsigned drawid; /**< id of this draw in a multidraw */
/**
* For indexed drawing, these fields apply after index lookup.
*/
@@ -700,20 +693,31 @@ struct pipe_draw_info
unsigned min_index; /**< the min index */
unsigned max_index; /**< the max index */
/**
* Primitive restart enable/index (only applies to indexed drawing)
*/
unsigned restart_index;
/* Pointers must be at the end for an optimal structure layout on 64-bit. */
+ /**
+ * An index buffer. When an index buffer is bound, all indices to vertices
+ * will be looked up from the buffer.
+ *
+ * If has_user_indices, use index.user, else use index.resource.
+ */
+ union {
+ struct pipe_resource *resource; /**< real buffer */
+ const void *user; /**< pointer to a user buffer */
+ } index;
+
struct pipe_draw_indirect_info *indirect; /**< Indirect draw. */
/**
* Stream output target. If not NULL, it's used to provide the 'count'
* parameter based on the number vertices captured by the stream output
* stage. (or generally, based on the number of bytes captured)
*
* Only 'mode', 'start_instance', and 'instance_count' are taken into
* account, all the other variables from pipe_draw_info are ignored.
*
diff --git a/src/gallium/state_trackers/nine/device9.c b/src/gallium/state_trackers/nine/device9.c
index 6390735..5dc24d6 100644
--- a/src/gallium/state_trackers/nine/device9.c
+++ b/src/gallium/state_trackers/nine/device9.c
@@ -2849,83 +2849,83 @@ NineDevice9_DrawIndexedPrimitiveUP( struct NineDevice9 *This,
D3DPRIMITIVETYPE PrimitiveType,
UINT MinVertexIndex,
UINT NumVertices,
UINT PrimitiveCount,
const void *pIndexData,
D3DFORMAT IndexDataFormat,
const void *pVertexStreamZeroData,
UINT VertexStreamZeroStride )
{
struct pipe_vertex_buffer vbuf;
- struct pipe_index_buffer ibuf;
DBG("iface %p, PrimitiveType %u, MinVertexIndex %u, NumVertices %u "
"PrimitiveCount %u, pIndexData %p, IndexDataFormat %u "
"pVertexStreamZeroData %p, VertexStreamZeroStride %u\n",
This, PrimitiveType, MinVertexIndex, NumVertices, PrimitiveCount,
pIndexData, IndexDataFormat,
pVertexStreamZeroData, VertexStreamZeroStride);
user_assert(pIndexData && pVertexStreamZeroData, D3DERR_INVALIDCALL);
user_assert(VertexStreamZeroStride, D3DERR_INVALIDCALL);
user_assert(IndexDataFormat == D3DFMT_INDEX16 ||
IndexDataFormat == D3DFMT_INDEX32, D3DERR_INVALIDCALL);
user_assert(PrimitiveCount, D3D_OK);
vbuf.stride = VertexStreamZeroStride;
vbuf.buffer_offset = 0;
vbuf.is_user_buffer = true;
vbuf.buffer.user = pVertexStreamZeroData;
- ibuf.index_size = (IndexDataFormat == D3DFMT_INDEX16) ? 2 : 4;
- ibuf.offset = 0;
- ibuf.buffer = NULL;
- ibuf.user_buffer = pIndexData;
+ unsigned index_size = (IndexDataFormat == D3DFMT_INDEX16) ? 2 : 4;
+ struct pipe_resource *ibuf = NULL;
if (!This->driver_caps.user_vbufs) {
const unsigned base = MinVertexIndex * VertexStreamZeroStride;
vbuf.is_user_buffer = false;
vbuf.buffer.resource = NULL;
u_upload_data(This->vertex_uploader,
base,
NumVertices * VertexStreamZeroStride, /* XXX */
4,
(const uint8_t *)pVertexStreamZeroData + base,
&vbuf.buffer_offset,
&vbuf.buffer.resource);
u_upload_unmap(This->vertex_uploader);
/* Won't be used: */
vbuf.buffer_offset -= base;
}
+
+ unsigned index_offset = 0;
if (This->csmt_active) {
u_upload_data(This->pipe_secondary->stream_uploader,
0,
- (prim_count_to_vertex_count(PrimitiveType, PrimitiveCount)) * ibuf.index_size,
+ (prim_count_to_vertex_count(PrimitiveType, PrimitiveCount)) * index_size,
4,
- ibuf.user_buffer,
- &ibuf.offset,
- &ibuf.buffer);
+ pIndexData,
+ &index_offset,
+ &ibuf);
u_upload_unmap(This->pipe_secondary->stream_uploader);
- ibuf.user_buffer = NULL;
}
NineBeforeDraw(This);
nine_context_draw_indexed_primitive_from_vtxbuf_idxbuf(This, PrimitiveType,
MinVertexIndex,
NumVertices,
PrimitiveCount,
&vbuf,
- &ibuf);
+ ibuf,
+ index_offset,
+ index_size);
NineAfterDraw(This);
pipe_vertex_buffer_unreference(&vbuf);
- pipe_resource_reference(&ibuf.buffer, NULL);
+ pipe_resource_reference(&ibuf, NULL);
NineDevice9_PauseRecording(This);
NineDevice9_SetIndices(This, NULL);
NineDevice9_SetStreamSource(This, 0, NULL, 0, 0);
NineDevice9_ResumeRecording(This);
return D3D_OK;
}
HRESULT NINE_WINAPI
@@ -3025,21 +3025,21 @@ NineDevice9_ProcessVertices( struct NineDevice9 *This,
}
draw.mode = PIPE_PRIM_POINTS;
draw.count = VertexCount;
draw.start_instance = 0;
draw.primitive_restart = FALSE;
draw.restart_index = 0;
draw.count_from_stream_output = NULL;
draw.indirect = NULL;
draw.instance_count = 1;
- draw.indexed = FALSE;
+ draw.index_size = 0;
draw.start = 0;
draw.index_bias = 0;
draw.min_index = 0;
draw.max_index = VertexCount - 1;
pipe_sw->set_stream_output_targets(pipe_sw, 1, &target, offsets);
pipe_sw->draw_vbo(pipe_sw, &draw);
diff --git a/src/gallium/state_trackers/nine/indexbuffer9.c b/src/gallium/state_trackers/nine/indexbuffer9.c
index cbd75fb..d5f5492 100644
--- a/src/gallium/state_trackers/nine/indexbuffer9.c
+++ b/src/gallium/state_trackers/nine/indexbuffer9.c
@@ -42,50 +42,49 @@ NineIndexBuffer9_ctor( struct NineIndexBuffer9 *This,
{
HRESULT hr;
DBG("This=%p pParams=%p pDesc=%p Usage=%s\n",
This, pParams, pDesc, nine_D3DUSAGE_to_str(pDesc->Usage));
hr = NineBuffer9_ctor(&This->base, pParams, D3DRTYPE_INDEXBUFFER,
pDesc->Usage, pDesc->Size, pDesc->Pool);
if (FAILED(hr))
return hr;
- This->buffer.buffer = NULL;
- This->buffer.offset = 0;
+ This->buffer = NULL;
+ This->offset = 0;
switch (pDesc->Format) {
- case D3DFMT_INDEX16: This->buffer.index_size = 2; break;
- case D3DFMT_INDEX32: This->buffer.index_size = 4; break;
+ case D3DFMT_INDEX16: This->index_size = 2; break;
+ case D3DFMT_INDEX32: This->index_size = 4; break;
default:
user_assert(!"Invalid index format.", D3DERR_INVALIDCALL);
break;
}
- This->buffer.user_buffer = NULL;
pDesc->Type = D3DRTYPE_INDEXBUFFER;
This->desc = *pDesc;
return D3D_OK;
}
void
NineIndexBuffer9_dtor( struct NineIndexBuffer9 *This )
{
NineBuffer9_dtor(&This->base);
}
-const struct pipe_index_buffer *
+struct pipe_resource *
NineIndexBuffer9_GetBuffer( struct NineIndexBuffer9 *This )
{
/* The resource may change */
- This->buffer.buffer = NineBuffer9_GetResource(&This->base, &This->buffer.offset);
- return &This->buffer;
+ This->buffer = NineBuffer9_GetResource(&This->base, &This->offset);
+ return This->buffer;
}
HRESULT NINE_WINAPI
NineIndexBuffer9_Lock( struct NineIndexBuffer9 *This,
UINT OffsetToLock,
UINT SizeToLock,
void **ppbData,
DWORD Flags )
{
return NineBuffer9_Lock(&This->base, OffsetToLock, SizeToLock, ppbData, Flags);
diff --git a/src/gallium/state_trackers/nine/indexbuffer9.h b/src/gallium/state_trackers/nine/indexbuffer9.h
index e695082..0efad7f 100644
--- a/src/gallium/state_trackers/nine/indexbuffer9.h
+++ b/src/gallium/state_trackers/nine/indexbuffer9.h
@@ -22,30 +22,31 @@
#ifndef _NINE_INDEXBUFFER9_H_
#define _NINE_INDEXBUFFER9_H_
#include "resource9.h"
#include "buffer9.h"
#include "pipe/p_state.h"
struct pipe_screen;
struct pipe_context;
-struct pipe_index_buffer;
struct pipe_transfer;
struct NineDevice9;
struct NineIndexBuffer9
{
struct NineBuffer9 base;
/* g3d stuff */
- struct pipe_index_buffer buffer;
+ struct pipe_resource *buffer;
+ unsigned offset;
+ unsigned index_size;
D3DINDEXBUFFER_DESC desc;
};
static inline struct NineIndexBuffer9 *
NineIndexBuffer9( void *data )
{
return (struct NineIndexBuffer9 *)data;
}
HRESULT
@@ -56,21 +57,21 @@ NineIndexBuffer9_new( struct NineDevice9 *pDevice,
HRESULT
NineIndexBuffer9_ctor( struct NineIndexBuffer9 *This,
struct NineUnknownParams *pParams,
D3DINDEXBUFFER_DESC *pDesc );
void
NineIndexBuffer9_dtor( struct NineIndexBuffer9 *This );
/*** Nine private ***/
-const struct pipe_index_buffer *
+struct pipe_resource *
NineIndexBuffer9_GetBuffer( struct NineIndexBuffer9 *This );
/*** Direct3D public ***/
HRESULT NINE_WINAPI
NineIndexBuffer9_Lock( struct NineIndexBuffer9 *This,
UINT OffsetToLock,
UINT SizeToLock,
void **ppbData,
DWORD Flags );
diff --git a/src/gallium/state_trackers/nine/nine_state.c b/src/gallium/state_trackers/nine/nine_state.c
index 3b1cd7c..8bfa7fc 100644
--- a/src/gallium/state_trackers/nine/nine_state.c
+++ b/src/gallium/state_trackers/nine/nine_state.c
@@ -1093,31 +1093,20 @@ commit_scissor(struct NineDevice9 *device)
static inline void
commit_rasterizer(struct NineDevice9 *device)
{
struct nine_context *context = &device->context;
cso_set_rasterizer(context->cso, &context->pipe_data.rast);
}
static inline void
-commit_index_buffer(struct NineDevice9 *device)
-{
- struct nine_context *context = &device->context;
- struct pipe_context *pipe = context->pipe;
- if (context->idxbuf.buffer)
- pipe->set_index_buffer(pipe, &context->idxbuf);
- else
- pipe->set_index_buffer(pipe, NULL);
-}
-
-static inline void
commit_vs_constants(struct NineDevice9 *device)
{
struct nine_context *context = &device->context;
struct pipe_context *pipe = context->pipe;
if (unlikely(!context->programmable_vs))
pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &context->pipe_data.cb_vs_ff);
else {
if (context->swvp) {
pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &context->pipe_data.cb0_swvp);
@@ -1228,22 +1217,20 @@ nine_update_state(struct NineDevice9 *device)
if (group & NINE_STATE_FB)
update_framebuffer(device, FALSE);
if (group & NINE_STATE_BLEND)
prepare_blend(device);
if (group & NINE_STATE_DSA)
prepare_dsa(device);
if (group & NINE_STATE_VIEWPORT)
update_viewport(device);
if (group & (NINE_STATE_VDECL | NINE_STATE_VS | NINE_STATE_STREAMFREQ))
update_vertex_elements(device);
- if (group & NINE_STATE_IDXBUF)
- commit_index_buffer(device);
}
if (likely(group & (NINE_STATE_FREQUENT | NINE_STATE_VS | NINE_STATE_PS | NINE_STATE_SWVP))) {
if (group & NINE_STATE_MULTISAMPLE)
group |= check_multisample(device);
if (group & NINE_STATE_RASTERIZER)
prepare_rasterizer(device);
if (group & (NINE_STATE_TEXTURE | NINE_STATE_SAMPLER))
update_textures_and_samplers(device);
if ((group & (NINE_STATE_VS_CONST | NINE_STATE_VS | NINE_STATE_SWVP)) && context->programmable_vs)
@@ -1568,42 +1555,39 @@ CSMT_ITEM_NO_WAIT(nine_context_set_stream_source_freq,
context->changed.group |= NINE_STATE_STREAMFREQ;
}
CSMT_ITEM_NO_WAIT(nine_context_set_indices_apply,
ARG_BIND_RES(struct pipe_resource, res),
ARG_VAL(UINT, IndexSize),
ARG_VAL(UINT, OffsetInBytes))
{
struct nine_context *context = &device->context;
- context->idxbuf.index_size = IndexSize;
- context->idxbuf.offset = OffsetInBytes;
- pipe_resource_reference(&context->idxbuf.buffer, res);
- context->idxbuf.user_buffer = NULL;
+ context->index_size = IndexSize;
+ context->index_offset = OffsetInBytes;
+ pipe_resource_reference(&context->idxbuf, res);
context->changed.group |= NINE_STATE_IDXBUF;
}
void
nine_context_set_indices(struct NineDevice9 *device,
struct NineIndexBuffer9 *idxbuf)
{
- const struct pipe_index_buffer *pipe_idxbuf;
struct pipe_resource *res = NULL;
UINT IndexSize = 0;
UINT OffsetInBytes = 0;
if (idxbuf) {
- pipe_idxbuf = NineIndexBuffer9_GetBuffer(idxbuf);
- IndexSize = pipe_idxbuf->index_size;
- res = pipe_idxbuf->buffer;
- OffsetInBytes = pipe_idxbuf->offset;
+ res = NineIndexBuffer9_GetBuffer(idxbuf);
+ IndexSize = idxbuf->index_size;
+ OffsetInBytes = idxbuf->offset;
}
nine_context_set_indices_apply(device, res, IndexSize, OffsetInBytes);
}
CSMT_ITEM_NO_WAIT(nine_context_set_vertex_declaration,
ARG_BIND_REF(struct NineVertexDeclaration9, vdecl))
{
struct nine_context *context = &device->context;
BOOL was_programmable_vs = context->programmable_vs;
@@ -2565,21 +2549,21 @@ CSMT_ITEM_NO_WAIT(nine_context_draw_primitive,
ARG_VAL(D3DPRIMITIVETYPE, PrimitiveType),
ARG_VAL(UINT, StartVertex),
ARG_VAL(UINT, PrimitiveCount))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
nine_update_state(device);
init_draw_info(&info, device, PrimitiveType, PrimitiveCount);
- info.indexed = FALSE;
+ info.index_size = 0;
info.start = StartVertex;
info.index_bias = 0;
info.min_index = info.start;
info.max_index = info.count - 1;
context->pipe->draw_vbo(context->pipe, &info);
}
CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive,
ARG_VAL(D3DPRIMITIVETYPE, PrimitiveType),
@@ -2588,73 +2572,77 @@ CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive,
ARG_VAL(UINT, NumVertices),
ARG_VAL(UINT, StartIndex),
ARG_VAL(UINT, PrimitiveCount))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
nine_update_state(device);
init_draw_info(&info, device, PrimitiveType, PrimitiveCount);
- info.indexed = TRUE;
+ info.index_size = context->index_size;
info.start = StartIndex;
info.index_bias = BaseVertexIndex;
/* These don't include index bias: */
info.min_index = MinVertexIndex;
info.max_index = MinVertexIndex + NumVertices - 1;
+ info.index.resource = context->idxbuf;
context->pipe->draw_vbo(context->pipe, &info);
}
CSMT_ITEM_NO_WAIT(nine_context_draw_primitive_from_vtxbuf,
ARG_VAL(D3DPRIMITIVETYPE, PrimitiveType),
ARG_VAL(UINT, PrimitiveCount),
ARG_BIND_VBUF(struct pipe_vertex_buffer, vtxbuf))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
nine_update_state(device);
init_draw_info(&info, device, PrimitiveType, PrimitiveCount);
- info.indexed = FALSE;
+ info.index_size = 0;
info.start = 0;
info.index_bias = 0;
info.min_index = 0;
info.max_index = info.count - 1;
context->pipe->set_vertex_buffers(context->pipe, 0, 1, vtxbuf);
context->pipe->draw_vbo(context->pipe, &info);
}
CSMT_ITEM_NO_WAIT(nine_context_draw_indexed_primitive_from_vtxbuf_idxbuf,
ARG_VAL(D3DPRIMITIVETYPE, PrimitiveType),
ARG_VAL(UINT, MinVertexIndex),
ARG_VAL(UINT, NumVertices),
ARG_VAL(UINT, PrimitiveCount),
ARG_BIND_VBUF(struct pipe_vertex_buffer, vbuf),
- ARG_BIND_IBUF(struct pipe_index_buffer, ibuf))
+ ARG_BIND_RES(struct pipe_resource, ibuf),
+ ARG_VAL(UINT, index_offset),
+ ARG_VAL(UINT, index_size))
{
struct nine_context *context = &device->context;
struct pipe_draw_info info;
nine_update_state(device);
init_draw_info(&info, device, PrimitiveType, PrimitiveCount);
- info.indexed = TRUE;
- info.start = 0;
+ info.index_size = index_size;
+ info.start = index_offset / info.index_size;
info.index_bias = 0;
info.min_index = MinVertexIndex;
info.max_index = MinVertexIndex + NumVertices - 1;
+ info.has_user_indices = false;
+ info.index.resource = ibuf;
context->pipe->set_vertex_buffers(context->pipe, 0, 1, vbuf);
- context->pipe->set_index_buffer(context->pipe, ibuf);
context->pipe->draw_vbo(context->pipe, &info);
}
CSMT_ITEM_NO_WAIT(nine_context_resource_copy_region,
ARG_BIND_REF(struct NineUnknown, dst),
ARG_BIND_REF(struct NineUnknown, src),
ARG_BIND_RES(struct pipe_resource, dst_res),
ARG_VAL(unsigned, dst_level),
ARG_COPY_REF(struct pipe_box, dst_box),
@@ -3128,31 +3116,30 @@ nine_context_clear(struct NineDevice9 *device)
* do not change on Reset.
*/
cso_set_samplers(cso, PIPE_SHADER_VERTEX, 0, NULL);
cso_set_samplers(cso, PIPE_SHADER_FRAGMENT, 0, NULL);
cso_set_sampler_views(cso, PIPE_SHADER_VERTEX, 0, NULL);
cso_set_sampler_views(cso, PIPE_SHADER_FRAGMENT, 0, NULL);
pipe->set_vertex_buffers(pipe, 0, device->caps.MaxStreams, NULL);
- pipe->set_index_buffer(pipe, NULL);
for (i = 0; i < ARRAY_SIZE(context->rt); ++i)
nine_bind(&context->rt[i], NULL);
nine_bind(&context->ds, NULL);
nine_bind(&context->vs, NULL);
nine_bind(&context->ps, NULL);
nine_bind(&context->vdecl, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; ++i)
pipe_vertex_buffer_unreference(&context->vtxbuf[i]);
- pipe_resource_reference(&context->idxbuf.buffer, NULL);
+ pipe_resource_reference(&context->idxbuf, NULL);
for (i = 0; i < NINE_MAX_SAMPLERS; ++i) {
context->texture[i].enabled = FALSE;
pipe_resource_reference(&context->texture[i].resource,
NULL);
pipe_sampler_view_reference(&context->texture[i].view[0],
NULL);
pipe_sampler_view_reference(&context->texture[i].view[1],
NULL);
}
diff --git a/src/gallium/state_trackers/nine/nine_state.h b/src/gallium/state_trackers/nine/nine_state.h
index a487d8c..9eb04f6 100644
--- a/src/gallium/state_trackers/nine/nine_state.h
+++ b/src/gallium/state_trackers/nine/nine_state.h
@@ -264,21 +264,23 @@ struct nine_context {
BOOL ps_const_b[NINE_MAX_CONST_B];
float *ps_lconstf_temp;
struct NineVertexDeclaration9 *vdecl;
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
UINT stream_freq[PIPE_MAX_ATTRIBS];
uint32_t stream_instancedata_mask; /* derived from stream_freq */
uint32_t stream_usage_mask; /* derived from VS and vdecl */
- struct pipe_index_buffer idxbuf;
+ struct pipe_resource *idxbuf;
+ unsigned index_offset;
+ unsigned index_size;
struct pipe_clip_state clip;
DWORD rs[NINED3DRS_COUNT];
struct {
BOOL enabled;
BOOL shadow;
DWORD lod;
D3DRESOURCETYPE type;
@@ -508,21 +510,23 @@ nine_context_draw_primitive_from_vtxbuf(struct NineDevice9 *device,
UINT PrimitiveCount,
struct pipe_vertex_buffer *vtxbuf);
void
nine_context_draw_indexed_primitive_from_vtxbuf_idxbuf(struct NineDevice9 *device,
D3DPRIMITIVETYPE PrimitiveType,
UINT MinVertexIndex,
UINT NumVertices,
UINT PrimitiveCount,
struct pipe_vertex_buffer *vbuf,
- struct pipe_index_buffer *ibuf);
+ struct pipe_resource *ibuf,
+ unsigned index_offset,
+ unsigned index_size);
void
nine_context_resource_copy_region(struct NineDevice9 *device,
struct NineUnknown *dst,
struct NineUnknown *src,
struct pipe_resource* dst_res,
unsigned dst_level,
const struct pipe_box *dst_box,
struct pipe_resource* src_res,
unsigned src_level,
diff --git a/src/mesa/state_tracker/st_draw.c b/src/mesa/state_tracker/st_draw.c
index 29381b6..3fee0cd 100644
--- a/src/mesa/state_tracker/st_draw.c
+++ b/src/mesa/state_tracker/st_draw.c
@@ -79,58 +79,28 @@ all_varyings_in_vbos(const struct gl_vertex_array *arrays[])
if (arrays[i]->StrideB &&
!arrays[i]->InstanceDivisor &&
!_mesa_is_bufferobj(arrays[i]->BufferObj))
return GL_FALSE;
return GL_TRUE;
}
/**
- * Basically, translate Mesa's index buffer information into
- * a pipe_index_buffer object.
- */
-static void
-setup_index_buffer(struct st_context *st,
- const struct _mesa_index_buffer *ib)
-{
- struct pipe_index_buffer ibuffer;
- struct gl_buffer_object *bufobj = ib->obj;
-
- ibuffer.index_size = ib->index_size;
-
- /* get/create the index buffer object */
- if (_mesa_is_bufferobj(bufobj)) {
- /* indices are in a real VBO */
- ibuffer.buffer = st_buffer_object(bufobj)->buffer;
- ibuffer.offset = pointer_to_offset(ib->ptr);
- ibuffer.user_buffer = NULL;
- }
- else {
- /* indices are in user space memory */
- ibuffer.buffer = NULL;
- ibuffer.offset = 0;
- ibuffer.user_buffer = ib->ptr;
- }
-
- cso_set_index_buffer(st->cso_context, &ibuffer);
-}
-
-
-/**
* Set the restart index.
*/
static void
-setup_primitive_restart(struct gl_context *ctx, struct pipe_draw_info *info,
- unsigned index_size)
+setup_primitive_restart(struct gl_context *ctx, struct pipe_draw_info *info)
{
if (ctx->Array._PrimitiveRestart) {
+ unsigned index_size = info->index_size;
+
info->restart_index =
_mesa_primitive_restart_index(ctx, index_size);
/* Enable primitive restart only when the restart index can have an
* effect. This is required for correctness in radeonsi VI support.
* Other hardware may also benefit from taking a faster, non-restart path
* when possible.
*/
if (index_size == 4 || info->restart_index < (1 << (index_size * 8)))
info->primitive_restart = true;
@@ -169,20 +139,21 @@ st_draw_vbo(struct gl_context *ctx,
GLuint min_index,
GLuint max_index,
struct gl_transform_feedback_object *tfb_vertcount,
unsigned stream,
struct gl_buffer_object *indirect)
{
struct st_context *st = st_context(ctx);
struct pipe_draw_info info;
const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
unsigned i;
+ unsigned start = 0;
/* Mesa core state should have been validated already */
assert(ctx->NewState == 0x0);
if (unlikely(!st->bitmap.cache.empty))
st_flush_bitmap_cache(st);
st_invalidate_readpix_cache(st);
/* Validate state. */
@@ -191,66 +162,76 @@ st_draw_vbo(struct gl_context *ctx,
st_validate_state(st, ST_PIPELINE_RENDER);
}
if (st->vertex_array_out_of_memory) {
return;
}
util_draw_init_info(&info);
if (ib) {
+ struct gl_buffer_object *bufobj = ib->obj;
+
/* Get index bounds for user buffers. */
if (!index_bounds_valid)
if (!all_varyings_in_vbos(arrays))
vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index,
nr_prims);
- setup_index_buffer(st, ib);
-
- info.indexed = TRUE;
+ info.index_size = ib->index_size;
info.min_index = min_index;
info.max_index = max_index;
- setup_primitive_restart(ctx, &info, ib->index_size);
+ if (_mesa_is_bufferobj(bufobj)) {
+ /* indices are in a real VBO */
+ info.index.resource = st_buffer_object(bufobj)->buffer;
+ start = pointer_to_offset(ib->ptr) / info.index_size;
+ } else {
+ /* indices are in user space memory */
+ info.has_user_indices = true;
+ info.index.user = ib->ptr;
+ }
+
+ setup_primitive_restart(ctx, &info);
}
else {
/* Transform feedback drawing is always non-indexed. */
/* Set info.count_from_stream_output. */
if (tfb_vertcount) {
if (!st_transform_feedback_draw_init(tfb_vertcount, stream, &info))
return;
}
}
assert(!indirect);
/* do actual drawing */
for (i = 0; i < nr_prims; i++) {
info.mode = translate_prim(ctx, prims[i].mode);
- info.start = prims[i].start;
+ info.start = start + prims[i].start;
info.count = prims[i].count;
info.start_instance = prims[i].base_instance;
info.instance_count = prims[i].num_instances;
info.vertices_per_patch = ctx->TessCtrlProgram.patch_vertices;
info.index_bias = prims[i].basevertex;
info.drawid = prims[i].draw_id;
if (!ib) {
info.min_index = info.start;
info.max_index = info.start + info.count - 1;
}
if (ST_DEBUG & DEBUG_DRAW) {
- debug_printf("st/draw: mode %s start %u count %u indexed %d\n",
+ debug_printf("st/draw: mode %s start %u count %u index_size %d\n",
u_prim_name(info.mode),
info.start,
info.count,
- info.indexed);
+ info.index_size);
}
/* Don't call u_trim_pipe_prim. Drivers should do it if they need it. */
cso_draw_vbo(st->cso_context, &info);
}
}
static void
st_indirect_draw_vbo(struct gl_context *ctx,
GLuint mode,
@@ -277,41 +258,47 @@ st_indirect_draw_vbo(struct gl_context *ctx,
st->gfx_shaders_may_be_dirty) {
st_validate_state(st, ST_PIPELINE_RENDER);
}
if (st->vertex_array_out_of_memory) {
return;
}
memset(&indirect, 0, sizeof(indirect));
util_draw_init_info(&info);
+ info.start = 0; /* index offset / index size */
if (ib) {
- setup_index_buffer(st, ib);
+ struct gl_buffer_object *bufobj = ib->obj;
+
+ /* indices are always in a real VBO */
+ assert(_mesa_is_bufferobj(bufobj));
- info.indexed = TRUE;
+ info.index_size = ib->index_size;
+ info.index.resource = st_buffer_object(bufobj)->buffer;
+ info.start = pointer_to_offset(ib->ptr) / info.index_size;
/* Primitive restart is not handled by the VBO module in this case. */
- setup_primitive_restart(ctx, &info, ib->index_size);
+ setup_primitive_restart(ctx, &info);
}
info.mode = translate_prim(ctx, mode);
info.vertices_per_patch = ctx->TessCtrlProgram.patch_vertices;
info.indirect = &indirect;
indirect.buffer = st_buffer_object(indirect_data)->buffer;
indirect.offset = indirect_offset;
if (ST_DEBUG & DEBUG_DRAW) {
- debug_printf("st/draw indirect: mode %s drawcount %d indexed %d\n",
+ debug_printf("st/draw indirect: mode %s drawcount %d index_size %d\n",
u_prim_name(info.mode),
draw_count,
- info.indexed);
+ info.index_size);
}
if (!st->has_multi_draw_indirect) {
int i;
assert(!indirect_params);
indirect.draw_count = 1;
for (i = 0; i < draw_count; i++) {
info.drawid = i;
cso_draw_vbo(st->cso_context, &info);
diff --git a/src/mesa/state_tracker/st_draw_feedback.c b/src/mesa/state_tracker/st_draw_feedback.c
index ad92ff5..987a156 100644
--- a/src/mesa/state_tracker/st_draw_feedback.c
+++ b/src/mesa/state_tracker/st_draw_feedback.c
@@ -121,21 +121,20 @@ st_feedback_draw_vbo(struct gl_context *ctx,
unsigned stream,
struct gl_buffer_object *indirect)
{
struct st_context *st = st_context(ctx);
struct pipe_context *pipe = st->pipe;
struct draw_context *draw = st_get_draw_context(st);
const struct st_vertex_program *vp;
const struct pipe_shader_state *vs;
struct pipe_vertex_buffer vbuffers[PIPE_MAX_SHADER_INPUTS];
struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
- struct pipe_index_buffer ibuffer;
struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {NULL};
struct pipe_transfer *ib_transfer = NULL;
const struct gl_vertex_array **arrays = ctx->Array._DrawArrays;
GLuint attr, i;
const GLubyte *low_addr = NULL;
const void *mapped_indices = NULL;
if (!draw)
return;
@@ -231,67 +230,64 @@ st_feedback_draw_vbo(struct gl_context *ctx,
/* tell draw about this attribute */
#if 0
draw_set_vertex_buffer(draw, attr, &vbuffer[attr]);
#endif
}
draw_set_vertex_buffers(draw, 0, vp->num_inputs, vbuffers);
draw_set_vertex_elements(draw, vp->num_inputs, velements);
- memset(&ibuffer, 0, sizeof(ibuffer));
+ unsigned start = 0;
+
if (ib) {
struct gl_buffer_object *bufobj = ib->obj;
+ unsigned index_size = ib->index_size;
- ibuffer.index_size = ib->index_size;
- if (ibuffer.index_size == 0)
+ if (index_size == 0)
goto out_unref_vertex;
if (bufobj && bufobj->Name) {
struct st_buffer_object *stobj = st_buffer_object(bufobj);
- pipe_resource_reference(&ibuffer.buffer, stobj->buffer);
- ibuffer.offset = pointer_to_offset(ib->ptr);
-
+ start = pointer_to_offset(ib->ptr) / index_size;
mapped_indices = pipe_buffer_map(pipe, stobj->buffer,
PIPE_TRANSFER_READ, &ib_transfer);
}
else {
- /* skip setting ibuffer.buffer as the draw module does not use it */
mapped_indices = ib->ptr;
}
draw_set_indexes(draw,
- (ubyte *) mapped_indices + ibuffer.offset,
- ibuffer.index_size, ~0);
+ (ubyte *) mapped_indices,
+ index_size, ~0);
}
/* set the constant buffer */
draw_set_mapped_constant_buffer(st->draw, PIPE_SHADER_VERTEX, 0,
st->state.constants[PIPE_SHADER_VERTEX].ptr,
st->state.constants[PIPE_SHADER_VERTEX].size);
/* draw here */
for (i = 0; i < nr_prims; i++) {
- draw_arrays(draw, prims[i].mode, prims[i].start, prims[i].count);
+ draw_arrays(draw, prims[i].mode, start + prims[i].start, prims[i].count);
}
/*
* unmap vertex/index buffers
*/
if (ib) {
draw_set_indexes(draw, NULL, 0, 0);
if (ib_transfer)
pipe_buffer_unmap(pipe, ib_transfer);
- pipe_resource_reference(&ibuffer.buffer, NULL);
}
out_unref_vertex:
for (attr = 0; attr < vp->num_inputs; attr++) {
if (vb_transfer[attr])
pipe_buffer_unmap(pipe, vb_transfer[attr]);
draw_set_mapped_vertex_buffer(draw, attr, NULL, 0);
pipe_vertex_buffer_unreference(&vbuffers[attr]);
}
draw_set_vertex_buffers(draw, 0, vp->num_inputs, NULL);
--
2.7.4
More information about the mesa-dev
mailing list