[Mesa-dev] [PATCH 5/6] pipe_video: Get rid of unnecessary state tracking
Maarten Lankhorst
m.b.lankhorst at gmail.com
Wed Nov 16 02:56:53 PST 2011
This requires some special care, vl_mpeg12_decoder was dumping some state
in the decode_buffer, and we can no longer control the order in which
decode_buffer/decoder is freed. As such, vl_idct cannot be used in cleanup.
Fortunately it's not needed. Also, flush will have to be called before
destroying a surface in XvMC, otherwise there exists the possibility
that the decoder still held a reference to it.
Signed-off-by: Maarten Lankhorst <m.b.lankhorst at gmail.com>
---
begin_frame was moved, end_frame was renamed to flush for vl_mpeg12_decoder (with some checks added)
src/gallium/auxiliary/vl/vl_idct.c | 14 +-
src/gallium/auxiliary/vl/vl_mpeg12_decoder.c | 367 ++++++++++----------
src/gallium/auxiliary/vl/vl_video_buffer.c | 4 +
src/gallium/auxiliary/vl/vl_video_buffer.h | 2 +
src/gallium/drivers/nouveau/nouveau_video.c | 12 -
src/gallium/include/pipe/p_video_decoder.h | 25 --
src/gallium/state_trackers/vdpau/decode.c | 40 +--
src/gallium/state_trackers/xorg/xvmc/surface.c | 65 +----
.../state_trackers/xorg/xvmc/xvmc_private.h | 1 -
9 files changed, 193 insertions(+), 337 deletions(-)
diff --git a/src/gallium/auxiliary/vl/vl_idct.c b/src/gallium/auxiliary/vl/vl_idct.c
index a2b3537..8394542 100644
--- a/src/gallium/auxiliary/vl/vl_idct.c
+++ b/src/gallium/auxiliary/vl/vl_idct.c
@@ -614,9 +614,9 @@ init_source(struct vl_idct *idct, struct vl_idct_buffer *buffer)
}
static void
-cleanup_source(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+cleanup_source(struct vl_idct_buffer *buffer)
{
- assert(idct && buffer);
+ assert(buffer);
pipe_surface_reference(&buffer->fb_state_mismatch.cbufs[0], NULL);
@@ -665,13 +665,13 @@ error_surfaces:
}
static void
-cleanup_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+cleanup_intermediate(struct vl_idct_buffer *buffer)
{
unsigned i;
- assert(idct && buffer);
+ assert(buffer);
- for(i = 0; i < idct->nr_of_render_targets; ++i)
+ for(i = 0; i < buffer->fb_state.nr_cbufs; ++i)
pipe_surface_reference(&buffer->fb_state.cbufs[i], NULL);
pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, NULL);
@@ -823,8 +823,8 @@ vl_idct_cleanup_buffer(struct vl_idct_buffer *buffer)
{
assert(buffer);
- cleanup_source(buffer->idct, buffer);
- cleanup_intermediate(buffer->idct, buffer);
+ cleanup_source(buffer);
+ cleanup_intermediate(buffer);
pipe_sampler_view_reference(&buffer->sampler_views.individual.matrix, NULL);
pipe_sampler_view_reference(&buffer->sampler_views.individual.transpose, NULL);
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
index 18ba12d..4eaad16 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
@@ -389,7 +389,7 @@ vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
- assert(decoder);
+ assert(dec);
/* Asserted in softpipe_delete_fs_state() for some reason */
dec->base.context->bind_vs_state(dec->base.context, NULL);
@@ -424,13 +424,164 @@ vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
FREE(dec);
}
-static void *
-vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
+static void
+vl_mpeg12_begin_frame(struct vl_mpeg12_decoder *dec)
+{
+ struct vl_mpeg12_buffer *buf;
+
+ struct pipe_resource *tex;
+ struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
+
+ unsigned i;
+
+ assert(dec);
+
+ buf = dec->current_buffer;
+
+ if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
+ dec->intra_matrix[0] = 1 << (7 - dec->picture_desc.intra_dc_precision);
+
+ for (i = 0; i < VL_MAX_PLANES; ++i) {
+ vl_zscan_upload_quant(&buf->zscan[i], dec->intra_matrix, true);
+ vl_zscan_upload_quant(&buf->zscan[i], dec->non_intra_matrix, false);
+ }
+
+ vl_vb_map(&buf->vertex_stream, dec->base.context);
+
+ tex = buf->zscan_source->texture;
+ rect.width = tex->width0;
+ rect.height = tex->height0;
+
+ buf->tex_transfer = dec->base.context->get_transfer
+ (
+ dec->base.context, tex,
+ 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
+ &rect
+ );
+
+ buf->block_num = 0;
+ buf->texels = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer);
+
+ for (i = 0; i < VL_MAX_PLANES; ++i) {
+ buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
+ buf->num_ycbcr_blocks[i] = 0;
+ }
+
+ for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
+ buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
+
+ if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+ vl_mpg12_bs_set_picture_desc(&buf->bs, &dec->picture_desc);
+
+ } else {
+
+ for (i = 0; i < VL_MAX_PLANES; ++i)
+ vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
+ }
+}
+
+static void
+vl_mpeg12_flush(struct pipe_video_decoder *decoder)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
- struct vl_mpeg12_buffer *buffer;
+ struct vl_video_buffer *mc_source;
+ struct pipe_sampler_view **mc_source_sv;
+ struct pipe_vertex_buffer vb[3];
+ struct vl_mpeg12_buffer *buf;
+
+ unsigned i, j, component;
+ unsigned nr_components;
assert(dec);
+ if (!dec->current_buffer)
+ return;
+
+ buf = dec->current_buffer;
+ dec->current_buffer = NULL;
+
+ vl_vb_unmap(&buf->vertex_stream, dec->base.context);
+
+ dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer);
+ dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer);
+
+ vb[0] = dec->quads;
+ vb[1] = dec->pos;
+
+ dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
+ for (i = 0; i < VL_MAX_PLANES; ++i) {
+ if (!dec->target_surfaces[i]) continue;
+
+ vl_mc_set_surface(&buf->mc[i], dec->target_surfaces[i]);
+
+ for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
+ if (!dec->ref_frames[j][i]) continue;
+
+ vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
+ dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
+
+ vl_mc_render_ref(&buf->mc[i], dec->ref_frames[j][i]);
+ }
+ }
+
+ dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
+ for (i = 0; i < VL_MAX_PLANES; ++i) {
+ if (!buf->num_ycbcr_blocks[i]) continue;
+
+ vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
+ dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
+
+ vl_zscan_render(&buf->zscan[i] , buf->num_ycbcr_blocks[i]);
+
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
+ vl_idct_flush(&buf->idct[i], buf->num_ycbcr_blocks[i]);
+ }
+
+ mc_source = (struct vl_video_buffer *)dec->mc_source;
+ mc_source_sv = mc_source->get_sampler_view_planes(&mc_source->base);
+ for (i = 0, component = 0; i < VL_MAX_PLANES; ++i) {
+ if (!dec->target_surfaces[i]) continue;
+
+ nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format);
+ for (j = 0; j < nr_components; ++j, ++component) {
+ if (!buf->num_ycbcr_blocks[i]) continue;
+
+ vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
+ dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
+
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
+ vl_idct_prepare_stage2(&buf->idct[component]);
+ else {
+ dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
+ dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
+ }
+ vl_mc_render_ycbcr(&buf->mc[i], j, buf->num_ycbcr_blocks[component]);
+ }
+ }
+}
+
+static void
+vl_mpeg12_destroy_decode_buffer(struct vl_video_buffer *from, void *buffer)
+{
+ struct vl_mpeg12_buffer *buf = buffer;
+
+ assert(from && buf);
+
+ cleanup_zscan_buffer(buf);
+
+ if (buf->idct[0].idct)
+ cleanup_idct_buffer(buf);
+
+ cleanup_mc_buffer(buf);
+
+ vl_vb_cleanup(&buf->vertex_stream);
+
+ FREE(buf);
+}
+
+static void *
+vl_mpeg12_create_decode_buffer(struct vl_mpeg12_decoder *dec, struct vl_video_buffer *target)
+{
+ struct vl_mpeg12_buffer *buffer;
buffer = CALLOC_STRUCT(vl_mpeg12_buffer);
if (buffer == NULL)
@@ -452,9 +603,11 @@ vl_mpeg12_create_buffer(struct pipe_video_decoder *decoder)
goto error_zscan;
if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
- vl_mpg12_bs_init(&buffer->bs, decoder);
+ vl_mpg12_bs_init(&buffer->bs, &dec->base);
- return buffer;
+ target->decode_buffer = buffer;
+ target->destroy_decode_buffer = vl_mpeg12_destroy_decode_buffer;
+ return target->decode_buffer;
error_zscan:
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
@@ -468,40 +621,11 @@ error_mc:
error_vertex_buffer:
FREE(buffer);
+ dec->current_buffer = NULL;
return NULL;
}
static void
-vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
- struct vl_mpeg12_buffer *buf = buffer;
-
- assert(dec && buf);
-
- cleanup_zscan_buffer(buf);
-
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- cleanup_idct_buffer(buf);
-
- cleanup_mc_buffer(buf);
-
- vl_vb_cleanup(&buf->vertex_stream);
-
- FREE(buf);
-}
-
-static void
-vl_mpeg12_set_decode_buffer(struct pipe_video_decoder *decoder, void *buffer)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
-
- assert(dec && buffer);
-
- dec->current_buffer = buffer;
-}
-
-static void
vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder,
struct pipe_picture_desc *picture)
{
@@ -542,75 +666,25 @@ vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder,
}
static void
-vl_mpeg12_set_decode_target(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer *target)
+vl_mpeg12_set_decode_target(struct vl_mpeg12_decoder *dec,
+ struct vl_video_buffer *target)
{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
struct pipe_surface **surfaces;
unsigned i;
- assert(dec);
+ if (dec->current_buffer && dec->current_buffer == target->decode_buffer)
+ return;
+ assert(!dec->current_buffer);
- surfaces = target->get_surfaces(target);
+ surfaces = target->base.get_surfaces(&target->base);
for (i = 0; i < VL_MAX_PLANES; ++i)
pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]);
-}
-
-static void
-vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
- struct vl_mpeg12_buffer *buf;
-
- struct pipe_resource *tex;
- struct pipe_box rect = { 0, 0, 0, 1, 1, 1 };
-
- unsigned i;
-
- assert(dec);
-
- buf = dec->current_buffer;
- assert(buf);
-
- if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
- dec->intra_matrix[0] = 1 << (7 - dec->picture_desc.intra_dc_precision);
-
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- vl_zscan_upload_quant(&buf->zscan[i], dec->intra_matrix, true);
- vl_zscan_upload_quant(&buf->zscan[i], dec->non_intra_matrix, false);
- }
-
- vl_vb_map(&buf->vertex_stream, dec->base.context);
-
- tex = buf->zscan_source->texture;
- rect.width = tex->width0;
- rect.height = tex->height0;
-
- buf->tex_transfer = dec->base.context->get_transfer
- (
- dec->base.context, tex,
- 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD,
- &rect
- );
-
- buf->block_num = 0;
- buf->texels = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer);
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
- buf->num_ycbcr_blocks[i] = 0;
- }
-
- for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
- buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
-
- if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
- vl_mpg12_bs_set_picture_desc(&buf->bs, &dec->picture_desc);
-
- } else {
-
- for (i = 0; i < VL_MAX_PLANES; ++i)
- vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
+ if (!target->decode_buffer)
+ vl_mpeg12_create_decode_buffer(dec, target);
+ if (!dec->current_buffer) {
+ dec->current_buffer = target->decode_buffer;
+ vl_mpeg12_begin_frame(dec);
}
}
@@ -626,13 +700,13 @@ vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
unsigned i, j, mv_weights[2];
- assert(dec && dec->current_buffer);
+ assert(dec);
assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
+ if (target)
+ vl_mpeg12_set_decode_target(dec, (struct vl_video_buffer*)target);
buf = dec->current_buffer;
assert(buf);
- if (target)
- vl_mpeg12_set_decode_target(&dec->base, target);
for (; num_macroblocks > 0; --num_macroblocks) {
unsigned mb_addr = mb->y * dec->width_in_macroblocks + mb->x;
@@ -694,8 +768,8 @@ vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
unsigned i;
- assert(dec && dec->current_buffer);
- vl_mpeg12_set_decode_target(&dec->base, target);
+ assert(dec);
+ vl_mpeg12_set_decode_target(dec, (struct vl_video_buffer*)target);
buf = dec->current_buffer;
assert(buf);
@@ -707,90 +781,6 @@ vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
vl_mpg12_bs_decode(&buf->bs, num_bytes, data);
}
-static void
-vl_mpeg12_end_frame(struct pipe_video_decoder *decoder)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
- struct vl_video_buffer *mc_source;
- struct pipe_sampler_view **mc_source_sv;
- struct pipe_vertex_buffer vb[3];
- struct vl_mpeg12_buffer *buf;
-
- unsigned i, j, component;
- unsigned nr_components;
-
- assert(dec && dec->current_buffer);
-
- buf = dec->current_buffer;
-
- vl_vb_unmap(&buf->vertex_stream, dec->base.context);
-
- dec->base.context->transfer_unmap(dec->base.context, buf->tex_transfer);
- dec->base.context->transfer_destroy(dec->base.context, buf->tex_transfer);
-
- vb[0] = dec->quads;
- vb[1] = dec->pos;
-
- dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_mv);
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- if (!dec->target_surfaces[i]) continue;
-
- vl_mc_set_surface(&buf->mc[i], dec->target_surfaces[i]);
-
- for (j = 0; j < VL_MAX_REF_FRAMES; ++j) {
- if (!dec->ref_frames[j][i]) continue;
-
- vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);;
- dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
-
- vl_mc_render_ref(&buf->mc[i], dec->ref_frames[j][i]);
- }
- }
-
- dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
- for (i = 0; i < VL_MAX_PLANES; ++i) {
- if (!buf->num_ycbcr_blocks[i]) continue;
-
- vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
- dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
-
- vl_zscan_render(&buf->zscan[i] , buf->num_ycbcr_blocks[i]);
-
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- vl_idct_flush(&buf->idct[i], buf->num_ycbcr_blocks[i]);
- }
-
- mc_source = (struct vl_video_buffer *)dec->mc_source;
- mc_source_sv = mc_source->get_sampler_view_planes(&mc_source->base);
- for (i = 0, component = 0; i < VL_MAX_PLANES; ++i) {
- if (!dec->target_surfaces[i]) continue;
-
- nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format);
- for (j = 0; j < nr_components; ++j, ++component) {
- if (!buf->num_ycbcr_blocks[i]) continue;
-
- vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
- dec->base.context->set_vertex_buffers(dec->base.context, 2, vb);
-
- if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
- vl_idct_prepare_stage2(&buf->idct[component]);
- else {
- dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
- dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
- }
- vl_mc_render_ycbcr(&buf->mc[i], j, buf->num_ycbcr_blocks[component]);
- }
- }
-}
-
-static void
-vl_mpeg12_flush(struct pipe_video_decoder *decoder)
-{
- assert(decoder);
-
- //Noop, for shaders it is much faster to flush everything in end_frame
-}
-
static bool
init_pipe_state(struct vl_mpeg12_decoder *dec)
{
@@ -1057,14 +1047,9 @@ vl_create_mpeg12_decoder(struct pipe_context *context,
dec->base.max_references = max_references;
dec->base.destroy = vl_mpeg12_destroy;
- dec->base.create_buffer = vl_mpeg12_create_buffer;
- dec->base.destroy_buffer = vl_mpeg12_destroy_buffer;
- dec->base.set_decode_buffer = vl_mpeg12_set_decode_buffer;
dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters;
- dec->base.begin_frame = vl_mpeg12_begin_frame;
dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
- dec->base.end_frame = vl_mpeg12_end_frame;
dec->base.flush = vl_mpeg12_flush;
dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
diff --git a/src/gallium/auxiliary/vl/vl_video_buffer.c b/src/gallium/auxiliary/vl/vl_video_buffer.c
index b982477..9863fe9 100644
--- a/src/gallium/auxiliary/vl/vl_video_buffer.c
+++ b/src/gallium/auxiliary/vl/vl_video_buffer.c
@@ -106,6 +106,10 @@ vl_video_buffer_destroy(struct pipe_video_buffer *buffer)
unsigned i;
assert(buf);
+ if (buf->destroy_decode_buffer)
+ buf->destroy_decode_buffer(buf, buf->decode_buffer);
+ else
+ assert(!buf->decode_buffer);
for (i = 0; i < VL_MAX_PLANES; ++i) {
pipe_surface_reference(&buf->surfaces[i], NULL);
diff --git a/src/gallium/auxiliary/vl/vl_video_buffer.h b/src/gallium/auxiliary/vl/vl_video_buffer.h
index de64f69..f88df03 100644
--- a/src/gallium/auxiliary/vl/vl_video_buffer.h
+++ b/src/gallium/auxiliary/vl/vl_video_buffer.h
@@ -48,6 +48,8 @@ struct vl_video_buffer
struct pipe_sampler_view *sampler_view_planes[VL_MAX_PLANES];
struct pipe_sampler_view *sampler_view_components[VL_MAX_PLANES];
struct pipe_surface *surfaces[VL_MAX_PLANES];
+ void *decode_buffer;
+ void (*destroy_decode_buffer)(struct vl_video_buffer *,void*);
};
/**
diff --git a/src/gallium/drivers/nouveau/nouveau_video.c b/src/gallium/drivers/nouveau/nouveau_video.c
index 879f569..bf67d03 100644
--- a/src/gallium/drivers/nouveau/nouveau_video.c
+++ b/src/gallium/drivers/nouveau/nouveau_video.c
@@ -499,16 +499,6 @@ nouveau_decoder_destroy(struct pipe_video_decoder *decoder)
FREE(dec);
}
-static void
-nouveau_decoder_begin_frame(struct pipe_video_decoder *decoder)
-{
-}
-
-static void
-nouveau_decoder_end_frame(struct pipe_video_decoder *decoder)
-{
-}
-
static struct pipe_video_decoder *
nouveau_create_decoder(struct pipe_context *context,
struct nouveau_screen *screen,
@@ -559,8 +549,6 @@ nouveau_create_decoder(struct pipe_context *context,
dec->base.height = height;
dec->base.max_references = max_references;
dec->base.destroy = nouveau_decoder_destroy;
- dec->base.begin_frame = nouveau_decoder_begin_frame;
- dec->base.end_frame = nouveau_decoder_end_frame;
dec->base.set_picture_parameters = nouveau_decoder_set_picture_parameters;
dec->base.decode_macroblock = nouveau_decoder_decode_macroblock;
dec->base.flush = nouveau_decoder_flush;
diff --git a/src/gallium/include/pipe/p_video_decoder.h b/src/gallium/include/pipe/p_video_decoder.h
index 7596ead..3623042 100644
--- a/src/gallium/include/pipe/p_video_decoder.h
+++ b/src/gallium/include/pipe/p_video_decoder.h
@@ -60,21 +60,6 @@ struct pipe_video_decoder
void (*destroy)(struct pipe_video_decoder *decoder);
/**
- * Creates a decoder buffer
- */
- void *(*create_buffer)(struct pipe_video_decoder *decoder);
-
- /**
- * Destroys a decoder buffer
- */
- void (*destroy_buffer)(struct pipe_video_decoder *decoder, void *buffer);
-
- /**
- * set the current decoder buffer
- */
- void (*set_decode_buffer)(struct pipe_video_decoder *decoder, void *buffer);
-
- /**
* set the picture parameters for the next frame
* only used for bitstream decoding
*/
@@ -82,11 +67,6 @@ struct pipe_video_decoder
struct pipe_picture_desc *picture);
/**
- * start decoding of a new frame
- */
- void (*begin_frame)(struct pipe_video_decoder *decoder);
-
- /**
* decode a macroblock
*/
void (*decode_macroblock)(struct pipe_video_decoder *decoder,
@@ -102,11 +82,6 @@ struct pipe_video_decoder
unsigned num_bytes, const void *data);
/**
- * end decoding of the current frame
- */
- void (*end_frame)(struct pipe_video_decoder *decoder);
-
- /**
* flush any outstanding command buffers to the hardware
* should be called before a video_buffer is acessed by the state tracker again
*/
diff --git a/src/gallium/state_trackers/vdpau/decode.c b/src/gallium/state_trackers/vdpau/decode.c
index 4286e8c..cd8dc7a 100644
--- a/src/gallium/state_trackers/vdpau/decode.c
+++ b/src/gallium/state_trackers/vdpau/decode.c
@@ -48,7 +48,6 @@ vlVdpDecoderCreate(VdpDevice device,
vlVdpDevice *dev;
vlVdpDecoder *vldecoder;
VdpStatus ret;
- unsigned i;
bool supported;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Creating decoder\n");
@@ -103,19 +102,6 @@ vlVdpDecoderCreate(VdpDevice device,
pipe->screen, p_profile,
PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED
);
- vldecoder->cur_buffer = 0;
-
- vldecoder->buffers = CALLOC(vldecoder->num_buffers, sizeof(void*));
- if (!vldecoder->buffers)
- goto error_alloc_buffers;
-
- for (i = 0; i < vldecoder->num_buffers; ++i) {
- vldecoder->buffers[i] = vldecoder->decoder->create_buffer(vldecoder->decoder);
- if (!vldecoder->buffers[i]) {
- ret = VDP_STATUS_ERROR;
- goto error_create_buffers;
- }
- }
*decoder = vlAddDataHTAB(vldecoder);
if (*decoder == 0) {
@@ -128,16 +114,6 @@ vlVdpDecoderCreate(VdpDevice device,
return VDP_STATUS_OK;
error_handle:
-error_create_buffers:
-
- for (i = 0; i < vldecoder->num_buffers; ++i)
- if (vldecoder->buffers[i])
- vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffers[i]);
-
- FREE(vldecoder->buffers);
-
-error_alloc_buffers:
-
vldecoder->decoder->destroy(vldecoder->decoder);
error_decoder:
@@ -152,7 +128,6 @@ VdpStatus
vlVdpDecoderDestroy(VdpDecoder decoder)
{
vlVdpDecoder *vldecoder;
- unsigned i;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Destroying decoder\n");
@@ -160,12 +135,6 @@ vlVdpDecoderDestroy(VdpDecoder decoder)
if (!vldecoder)
return VDP_STATUS_INVALID_HANDLE;
- for (i = 0; i < vldecoder->num_buffers; ++i)
- if (vldecoder->buffers[i])
- vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffers[i]);
-
- FREE(vldecoder->buffers);
-
vldecoder->decoder->destroy(vldecoder->decoder);
FREE(vldecoder);
@@ -308,7 +277,6 @@ vlVdpDecoderRenderVC1(struct pipe_video_decoder *decoder,
VdpPictureInfoVC1 *picture_info)
{
struct pipe_vc1_picture_desc picture;
- struct pipe_video_buffer *ref_frames[2] = {};
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding VC-1\n");
@@ -400,11 +368,6 @@ vlVdpDecoderRender(VdpDecoder decoder,
// TODO: Recreate decoder with correct chroma
return VDP_STATUS_INVALID_CHROMA_TYPE;
- ++vldecoder->cur_buffer;
- vldecoder->cur_buffer %= vldecoder->num_buffers;
-
- dec->set_decode_buffer(dec, vldecoder->buffers[vldecoder->cur_buffer]);
-
switch (u_reduce_video_profile(dec->profile)) {
case PIPE_VIDEO_CODEC_MPEG12:
ret = vlVdpDecoderRenderMpeg12(dec, (VdpPictureInfoMPEG1Or2 *)picture_info);
@@ -421,11 +384,10 @@ vlVdpDecoderRender(VdpDecoder decoder,
if (ret != VDP_STATUS_OK)
return ret;
- dec->begin_frame(dec);
for (i = 0; i < bitstream_buffer_count; ++i)
dec->decode_bitstream(dec, vlsurf->video_buffer,
bitstream_buffers[i].bitstream_bytes,
bitstream_buffers[i].bitstream);
- dec->end_frame(dec);
+ dec->flush(dec);
return ret;
}
diff --git a/src/gallium/state_trackers/xorg/xvmc/surface.c b/src/gallium/state_trackers/xorg/xvmc/surface.c
index 3025f0e..c7aa301 100644
--- a/src/gallium/state_trackers/xorg/xvmc/surface.c
+++ b/src/gallium/state_trackers/xorg/xvmc/surface.c
@@ -111,9 +111,6 @@ SetDecoderStatus(XvMCSurfacePrivate *surface)
context_priv = surface->context->privData;
decoder = context_priv->decoder;
- if (surface->decode_buffer)
- decoder->set_decode_buffer(decoder, surface->decode_buffer);
-
if (surface->ref[0])
desc.ref_forward = ((XvMCSurfacePrivate*)surface->ref[0]->privData)->video_buffer;
if (surface->ref[1])
@@ -121,38 +118,6 @@ SetDecoderStatus(XvMCSurfacePrivate *surface)
decoder->set_picture_parameters(context_priv->decoder, &desc.base);
}
-static void
-EndFrame(XvMCSurfacePrivate *surface)
-{
- XvMCContextPrivate *context_priv;
- unsigned i;
-
- assert(surface);
-
- context_priv = surface->context->privData;
-
- for ( i = 0; i < 2; ++i ) {
- if (surface->ref[i]) {
- XvMCSurface *ref = surface->ref[i];
- XvMCSurfacePrivate *refpriv;
-
- assert(ref && ref->privData);
- refpriv = ref->privData;
- assert(!refpriv->picture_structure); // There's just no sane way this can happen..
- }
- }
-
- if (surface->picture_structure) {
- SetDecoderStatus(surface);
- surface->picture_structure = 0;
-
- for (i = 0; i < 2; ++i)
- surface->ref[i] = NULL;
-
- context_priv->decoder->end_frame(context_priv->decoder);
- }
-}
-
PUBLIC
Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surface)
{
@@ -176,8 +141,6 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
if (!surface_priv)
return BadAlloc;
- if (context_priv->decoder->create_buffer)
- surface_priv->decode_buffer = context_priv->decoder->create_buffer(context_priv->decoder);
surface_priv->video_buffer = pipe->create_video_buffer
(
pipe, PIPE_FORMAT_NV12, context_priv->decoder->chroma_format,
@@ -208,7 +171,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
)
{
struct pipe_mpeg12_macroblock mb[num_macroblocks];
- struct pipe_video_decoder *decoder;
XvMCContextPrivate *context_priv;
XvMCSurfacePrivate *target_surface_priv;
@@ -247,7 +209,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
assert(flags == 0 || flags == XVMC_SECOND_FIELD);
context_priv = context->privData;
- decoder = context_priv->decoder;
target_surface_priv = target_surface->privData;
past_surface_priv = past_surface ? past_surface->privData : NULL;
@@ -258,12 +219,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
assert(!future_surface || future_surface_priv->context == context);
// call end frame on all referenced frames
- if (past_surface)
- EndFrame(past_surface->privData);
-
- if (future_surface)
- EndFrame(future_surface->privData);
-
xvmc_mb = macroblocks->macro_blocks + first_macroblock;
/* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
@@ -274,7 +229,7 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
(xvmc_mb->x == 0 && xvmc_mb->y == 0))) {
// If they change anyway we must assume that the current frame is ended
- EndFrame(target_surface_priv);
+ context_priv->decoder->flush(context_priv->decoder);
}
target_surface_priv->ref[0] = past_surface;
@@ -285,7 +240,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
else {
target_surface_priv->picture_structure = picture_structure;
SetDecoderStatus(target_surface_priv);
- decoder->begin_frame(decoder);
}
MacroBlocksToPipe(context_priv, target_surface_priv, picture_structure,
@@ -355,6 +309,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
surface_priv = surface->privData;
context = surface_priv->context;
context_priv = context->privData;
+ context_priv->decoder->flush(context_priv->decoder);
assert(flags == XVMC_TOP_FIELD || flags == XVMC_BOTTOM_FIELD || flags == XVMC_FRAME_PICTURE);
assert(srcx + srcw - 1 < surface->width);
@@ -389,14 +344,6 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
assert(desty + desth - 1 < drawable_surface->height);
*/
- if (surface_priv->ref[0])
- EndFrame(surface_priv->ref[0]->privData);
- if (surface_priv->ref[1])
- EndFrame(surface_priv->ref[1]->privData);
- EndFrame(surface_priv);
-
- context_priv->decoder->flush(context_priv->decoder);
-
vl_compositor_clear_layers(compositor);
vl_compositor_set_buffer_layer(compositor, 0, surface_priv->video_buffer, &src_rect, NULL);
@@ -493,13 +440,7 @@ Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
surface_priv = surface->privData;
context_priv = surface_priv->context->privData;
-
- if (surface_priv->picture_structure) {
- SetDecoderStatus(surface_priv);
- context_priv->decoder->end_frame(context_priv->decoder);
- }
- if (surface_priv->decode_buffer)
- context_priv->decoder->destroy_buffer(context_priv->decoder, surface_priv->decode_buffer);
+ context_priv->decoder->flush(context_priv->decoder);
surface_priv->video_buffer->destroy(surface_priv->video_buffer);
FREE(surface_priv);
surface->privData = NULL;
diff --git a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
index 305e51f..642baa8 100644
--- a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
+++ b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
@@ -69,7 +69,6 @@ typedef struct
typedef struct
{
- void *decode_buffer;
struct pipe_video_buffer *video_buffer;
/* nonzero if this picture is already being decoded */
--
1.7.7.1
More information about the mesa-dev
mailing list