[Mesa-dev] [PATCH 5/6] vl: Remove most members of pipe_video_decoder
Maarten Lankhorst
m.b.lankhorst at gmail.com
Thu Dec 1 16:20:45 PST 2011
create_buffer, destroy_buffer and set_buffer are a curiosity of vl_mpeg12_decoder
and shouldn't be part of the api.
set_quant_matrix and set_reference_frames shouldn't be separate calls, but part of
picparm, which is a requirement for h264 to work.
set_decode_target and set_picture_parameters should instead be passed as argument to
decode_(bitstream,macroblocks). flush is used to signal in XvMC that current frame has
ended. begin_frame and end_frame are moved into vl_mpeg12_decoder internally.
Signed-off-by: Maarten Lankhorst <m.b.lankhorst at gmail.com>
---
src/gallium/auxiliary/vl/vl_decoder.c | 13 -
src/gallium/auxiliary/vl/vl_decoder.h | 6 -
src/gallium/auxiliary/vl/vl_idct.c | 14 +-
src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c | 44 ++--
src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h | 4 +-
src/gallium/auxiliary/vl/vl_mpeg12_decoder.c | 124 ++++-----
src/gallium/auxiliary/vl/vl_mpeg12_decoder.h | 3 +-
src/gallium/auxiliary/vl/vl_vlc.h | 18 +-
src/gallium/drivers/nouveau/nouveau_video.c | 57 +---
src/gallium/drivers/nvfx/nvfx_screen.c | 2 -
src/gallium/drivers/r300/r300_screen.c | 2 -
src/gallium/drivers/r600/r600_pipe.c | 2 -
src/gallium/drivers/softpipe/sp_screen.c | 2 -
src/gallium/include/pipe/p_video_decoder.h | 58 +----
src/gallium/include/pipe/p_video_enums.h | 3 +-
src/gallium/include/pipe/p_video_state.h | 15 +-
src/gallium/state_trackers/vdpau/decode.c | 295 ++++++++------------
src/gallium/state_trackers/vdpau/vdpau_private.h | 3 -
src/gallium/state_trackers/xorg/xvmc/surface.c | 109 +-------
.../state_trackers/xorg/xvmc/xvmc_private.h | 1 -
20 files changed, 251 insertions(+), 524 deletions(-)
diff --git a/src/gallium/auxiliary/vl/vl_decoder.c b/src/gallium/auxiliary/vl/vl_decoder.c
index 383e02d..da905a6 100644
--- a/src/gallium/auxiliary/vl/vl_decoder.c
+++ b/src/gallium/auxiliary/vl/vl_decoder.c
@@ -44,19 +44,6 @@ vl_profile_supported(struct pipe_screen *screen, enum pipe_video_profile profile
}
}
-unsigned
-vl_num_buffers_desired(struct pipe_screen *screen, enum pipe_video_profile profile)
-{
- assert(screen);
- switch (u_reduce_video_profile(profile)) {
- case PIPE_VIDEO_CODEC_MPEG12:
- return 4;
-
- default:
- return 1;
- }
-}
-
struct pipe_video_decoder *
vl_create_decoder(struct pipe_context *pipe,
enum pipe_video_profile profile,
diff --git a/src/gallium/auxiliary/vl/vl_decoder.h b/src/gallium/auxiliary/vl/vl_decoder.h
index a997516..a7abe9c 100644
--- a/src/gallium/auxiliary/vl/vl_decoder.h
+++ b/src/gallium/auxiliary/vl/vl_decoder.h
@@ -38,12 +38,6 @@ bool
vl_profile_supported(struct pipe_screen *screen, enum pipe_video_profile profile);
/**
- * the desired number of buffers for optimal operation
- */
-unsigned
-vl_num_buffers_desired(struct pipe_screen *screen, enum pipe_video_profile profile);
-
-/**
* standard implementation of pipe->create_video_decoder
*/
struct pipe_video_decoder *
diff --git a/src/gallium/auxiliary/vl/vl_idct.c b/src/gallium/auxiliary/vl/vl_idct.c
index a2b3537..8394542 100644
--- a/src/gallium/auxiliary/vl/vl_idct.c
+++ b/src/gallium/auxiliary/vl/vl_idct.c
@@ -614,9 +614,9 @@ init_source(struct vl_idct *idct, struct vl_idct_buffer *buffer)
}
static void
-cleanup_source(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+cleanup_source(struct vl_idct_buffer *buffer)
{
- assert(idct && buffer);
+ assert(buffer);
pipe_surface_reference(&buffer->fb_state_mismatch.cbufs[0], NULL);
@@ -665,13 +665,13 @@ error_surfaces:
}
static void
-cleanup_intermediate(struct vl_idct *idct, struct vl_idct_buffer *buffer)
+cleanup_intermediate(struct vl_idct_buffer *buffer)
{
unsigned i;
- assert(idct && buffer);
+ assert(buffer);
- for(i = 0; i < idct->nr_of_render_targets; ++i)
+ for(i = 0; i < buffer->fb_state.nr_cbufs; ++i)
pipe_surface_reference(&buffer->fb_state.cbufs[i], NULL);
pipe_sampler_view_reference(&buffer->sampler_views.individual.intermediate, NULL);
@@ -823,8 +823,8 @@ vl_idct_cleanup_buffer(struct vl_idct_buffer *buffer)
{
assert(buffer);
- cleanup_source(buffer->idct, buffer);
- cleanup_intermediate(buffer->idct, buffer);
+ cleanup_source(buffer);
+ cleanup_intermediate(buffer);
pipe_sampler_view_reference(&buffer->sampler_views.individual.matrix, NULL);
pipe_sampler_view_reference(&buffer->sampler_views.individual.transpose, NULL);
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c b/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c
index 936cf2c..ddeaf31 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c
@@ -823,7 +823,7 @@ decode_slice(struct vl_mpg12_bs *bs)
inc += vl_vlc_get_vlclbf(&bs->vlc, tbl_B1, 11);
if (x != -1) {
mb.num_skipped_macroblocks = inc - 1;
- bs->decoder->decode_macroblock(bs->decoder, &mb.base, 1);
+ bs->decoder->decode_macroblock(bs->decoder, NULL, NULL, &mb.base, 1);
}
mb.x = x += inc;
@@ -927,7 +927,7 @@ decode_slice(struct vl_mpg12_bs *bs)
} while (vl_vlc_bits_left(&bs->vlc) && vl_vlc_peekbits(&bs->vlc, 23));
mb.num_skipped_macroblocks = 0;
- bs->decoder->decode_macroblock(bs->decoder, &mb.base, 1);
+ bs->decoder->decode_macroblock(bs->decoder, NULL, NULL, &mb.base, 1);
return true;
}
@@ -956,35 +956,27 @@ vl_mpg12_bs_set_picture_desc(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_
}
void
-vl_mpg12_bs_decode(struct vl_mpg12_bs *bs, unsigned num_bytes, const uint8_t *buffer)
+vl_mpg12_bs_decode(struct vl_mpg12_bs *bs,
+ unsigned n, unsigned len,
+ const unsigned *lens, const void* const*buffer)
{
assert(bs);
- assert(buffer && num_bytes);
+ assert(buffer && n && len);
- while(num_bytes > 2) {
- if (buffer[0] == 0x00 && buffer[1] == 0x00 && buffer[2] == 0x01 &&
- buffer[3] >= 0x01 && buffer[3] < 0xAF) {
- unsigned consumed;
-
- buffer += 3;
- num_bytes -= 3;
-
- vl_vlc_init(&bs->vlc, buffer, num_bytes);
-
- if (!decode_slice(bs))
- return;
-
- consumed = num_bytes - vl_vlc_bits_left(&bs->vlc) / 8;
-
- /* crap, this is a bug we have consumed more bytes than left in the buffer */
- assert(consumed <= num_bytes);
-
- num_bytes -= consumed;
- buffer += consumed;
+ vl_vlc_init(&bs->vlc, n, len, buffer, lens);
+ while (vl_vlc_bits_left(&bs->vlc) >= 32) {
+ if (vl_vlc_peekbits(&bs->vlc, 24) == 0x000001) {
+ vl_vlc_eatbits(&bs->vlc, 24);
+ if (vl_vlc_get_uimsbf(&bs->vlc, 8) > 0xaf)
+ continue;
} else {
- ++buffer;
- --num_bytes;
+ vl_vlc_eatbits(&bs->vlc, 8);
+ continue;
}
+
+ if (!decode_slice(bs))
+ return;
+ vl_vlc_bitalign(&bs->vlc);
}
}
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h b/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h
index c3f14a1..7a2b59f 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h
@@ -49,6 +49,8 @@ void
vl_mpg12_bs_set_picture_desc(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_desc *picture);
void
-vl_mpg12_bs_decode(struct vl_mpg12_bs *bs, unsigned num_bytes, const uint8_t *buffer);
+vl_mpg12_bs_decode(struct vl_mpg12_bs *bs,
+ unsigned n, unsigned len,
+ const unsigned *lens, const void* const*buffer);
#endif /* vl_mpeg12_bitstream_h */
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
index 2442d78..16f1129 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
@@ -48,6 +48,12 @@ struct format_config {
float mc_scale;
};
+static void
+vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer);
+
+static void
+vl_mpeg12_end_frame(struct pipe_video_decoder *decoder);
+
static const struct format_config bitstream_format_config[] = {
// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_FLOAT, 1.0f, SCALE_FACTOR_SSCALED },
// { PIPE_FORMAT_R16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, PIPE_FORMAT_R16G16B16A16_SSCALED, 1.0f, SCALE_FACTOR_SSCALED },
@@ -388,6 +394,7 @@ vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder*)decoder;
assert(decoder);
+ vl_mpeg12_destroy_buffer(decoder, dec->current_buffer);
/* Asserted in softpipe_delete_fs_state() for some reason */
dec->base.context->bind_vs_state(dec->base.context, NULL);
@@ -490,39 +497,36 @@ vl_mpeg12_destroy_buffer(struct pipe_video_decoder *decoder, void *buffer)
}
static void
-vl_mpeg12_set_decode_buffer(struct pipe_video_decoder *decoder, void *buffer)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
-
- assert(dec && buffer);
-
- dec->current_buffer = buffer;
-}
-
-static void
vl_mpeg12_set_picture_parameters(struct pipe_video_decoder *decoder,
struct pipe_picture_desc *picture)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
struct pipe_mpeg12_picture_desc *pic = (struct pipe_mpeg12_picture_desc *)picture;
+ struct pipe_sampler_view **sv;
+ unsigned j;
assert(dec && pic);
dec->picture_desc = *pic;
-}
-
-static void
-vl_mpeg12_set_quant_matrix(struct pipe_video_decoder *decoder,
- const struct pipe_quant_matrix *matrix)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
- const struct pipe_mpeg12_quant_matrix *m = (const struct pipe_mpeg12_quant_matrix *)matrix;
-
- assert(dec);
- assert(matrix->codec == PIPE_VIDEO_CODEC_MPEG12);
+ if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+ memcpy(dec->intra_matrix, pic->intra_matrix, 64);
+ memcpy(dec->non_intra_matrix, pic->non_intra_matrix, 64);
+ }
+ if (pic->ref_forward) {
+ sv = pic->ref_forward->get_sampler_view_planes(pic->ref_forward);
+ for (j = 0; j < VL_MAX_PLANES; ++j)
+ pipe_sampler_view_reference(&dec->ref_frames[0][j], sv[j]);
+ } else
+ for (j = 0; j < VL_MAX_PLANES; ++j)
+ pipe_sampler_view_reference(&dec->ref_frames[0][j], NULL);
- memcpy(dec->intra_matrix, m->intra_matrix, 64);
- memcpy(dec->non_intra_matrix, m->non_intra_matrix, 64);
+ if (pic->ref_backward) {
+ sv = pic->ref_backward->get_sampler_view_planes(pic->ref_backward);
+ for (j = 0; j < VL_MAX_PLANES; ++j)
+ pipe_sampler_view_reference(&dec->ref_frames[1][j], sv[j]);
+ } else
+ for (j = 0; j < VL_MAX_PLANES; ++j)
+ pipe_sampler_view_reference(&dec->ref_frames[1][j], NULL);
}
static void
@@ -538,29 +542,7 @@ vl_mpeg12_set_decode_target(struct pipe_video_decoder *decoder,
surfaces = target->get_surfaces(target);
for (i = 0; i < VL_MAX_PLANES; ++i)
pipe_surface_reference(&dec->target_surfaces[i], surfaces[i]);
-}
-
-static void
-vl_mpeg12_set_reference_frames(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer **ref_frames,
- unsigned num_ref_frames)
-{
- struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
- struct pipe_sampler_view **sv;
- unsigned i,j;
-
- assert(dec);
- assert(num_ref_frames <= VL_MAX_REF_FRAMES);
-
- for (i = 0; i < num_ref_frames; ++i) {
- sv = ref_frames[i]->get_sampler_view_planes(ref_frames[i]);
- for (j = 0; j < VL_MAX_PLANES; ++j)
- pipe_sampler_view_reference(&dec->ref_frames[i][j], sv[j]);
- }
-
- for (; i < VL_MAX_REF_FRAMES; ++i)
- for (j = 0; j < VL_MAX_PLANES; ++j)
- pipe_sampler_view_reference(&dec->ref_frames[i][j], NULL);
+ dec->last_target = (struct vl_video_buffer*)target;
}
static void
@@ -619,19 +601,29 @@ vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder)
for (i = 0; i < VL_MAX_PLANES; ++i)
vl_zscan_set_layout(&buf->zscan[i], dec->zscan_linear);
}
+ dec->started = 1;
}
static void
vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
const struct pipe_macroblock *macroblocks,
unsigned num_macroblocks)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblocks;
struct vl_mpeg12_buffer *buf;
-
unsigned i, j, mv_weights[2];
+ if (picture)
+ vl_mpeg12_set_picture_parameters(decoder, picture);
+ if (target && (!dec->started || target != &dec->last_target->base)) {
+ vl_mpeg12_end_frame(decoder);
+ vl_mpeg12_set_decode_target(decoder, target);
+ vl_mpeg12_begin_frame(decoder);
+ }
+
assert(dec && dec->current_buffer);
assert(macroblocks && macroblocks->codec == PIPE_VIDEO_CODEC_MPEG12);
@@ -690,23 +682,30 @@ vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
static void
vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
- unsigned num_bytes, const void *data)
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
+ unsigned n, unsigned total_len,
+ const unsigned *lens, const void *const*data)
{
struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
struct vl_mpeg12_buffer *buf;
unsigned i;
+ vl_mpeg12_set_picture_parameters(decoder, picture);
+ vl_mpeg12_set_decode_target(decoder, target);
assert(dec && dec->current_buffer);
buf = dec->current_buffer;
assert(buf);
+ vl_mpeg12_begin_frame(decoder);
for (i = 0; i < VL_MAX_PLANES; ++i)
vl_zscan_set_layout(&buf->zscan[i], dec->picture_desc.alternate_scan ?
dec->zscan_alternate : dec->zscan_normal);
- vl_mpg12_bs_decode(&buf->bs, num_bytes, data);
+ vl_mpg12_bs_decode(&buf->bs, n, total_len, lens, data);
+ vl_mpeg12_end_frame(decoder);
}
static void
@@ -716,11 +715,13 @@ vl_mpeg12_end_frame(struct pipe_video_decoder *decoder)
struct pipe_sampler_view **mc_source_sv;
struct pipe_vertex_buffer vb[3];
struct vl_mpeg12_buffer *buf;
-
unsigned i, j, component;
unsigned nr_components;
assert(dec && dec->current_buffer);
+ if (!dec->started)
+ return;
+ dec->started = 0;
buf = dec->current_buffer;
@@ -783,14 +784,6 @@ vl_mpeg12_end_frame(struct pipe_video_decoder *decoder)
}
}
-static void
-vl_mpeg12_flush(struct pipe_video_decoder *decoder)
-{
- assert(decoder);
-
- //Noop, for shaders it is much faster to flush everything in end_frame
-}
-
static bool
init_pipe_state(struct vl_mpeg12_decoder *dec)
{
@@ -1057,18 +1050,9 @@ vl_create_mpeg12_decoder(struct pipe_context *context,
dec->base.max_references = max_references;
dec->base.destroy = vl_mpeg12_destroy;
- dec->base.create_buffer = vl_mpeg12_create_buffer;
- dec->base.destroy_buffer = vl_mpeg12_destroy_buffer;
- dec->base.set_decode_buffer = vl_mpeg12_set_decode_buffer;
- dec->base.set_picture_parameters = vl_mpeg12_set_picture_parameters;
- dec->base.set_quant_matrix = vl_mpeg12_set_quant_matrix;
- dec->base.set_decode_target = vl_mpeg12_set_decode_target;
- dec->base.set_reference_frames = vl_mpeg12_set_reference_frames;
- dec->base.begin_frame = vl_mpeg12_begin_frame;
dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
- dec->base.end_frame = vl_mpeg12_end_frame;
- dec->base.flush = vl_mpeg12_flush;
+ dec->base.flush = vl_mpeg12_end_frame;
dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
@@ -1152,7 +1136,9 @@ vl_create_mpeg12_decoder(struct pipe_context *context,
memset(dec->intra_matrix, 0x10, 64);
memset(dec->non_intra_matrix, 0x10, 64);
-
+ dec->current_buffer = vl_mpeg12_create_buffer(&dec->base);
+ if (!dec->current_buffer)
+ goto error_pipe_state;
return &dec->base;
error_pipe_state:
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
index 817c1ff..0c81417 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
@@ -49,13 +49,14 @@ struct vl_mpeg12_decoder
unsigned blocks_per_line;
unsigned num_blocks;
- unsigned width_in_macroblocks;
+ unsigned width_in_macroblocks, started;
enum pipe_format zscan_source_format;
struct pipe_vertex_buffer quads;
struct pipe_vertex_buffer pos;
+ struct vl_video_buffer *last_target;
void *ves_ycbcr;
void *ves_mv;
diff --git a/src/gallium/auxiliary/vl/vl_vlc.h b/src/gallium/auxiliary/vl/vl_vlc.h
index dc4faed..9cccfa4 100644
--- a/src/gallium/auxiliary/vl/vl_vlc.h
+++ b/src/gallium/auxiliary/vl/vl_vlc.h
@@ -90,10 +90,14 @@ vl_vlc_fillbits(struct vl_vlc *vlc)
}
static INLINE void
-vl_vlc_init(struct vl_vlc *vlc, const uint8_t *data, unsigned len)
+vl_vlc_init(struct vl_vlc *vlc,
+ const unsigned array_size, unsigned total_len,
+ const const void *const *datas, const unsigned *lens)
{
+ const uint8_t *data = datas[0];
assert(vlc);
- assert(data && len);
+ assert(array_size == 1); // TODO
+ assert(datas[0] && lens[0]);
vlc->buffer = 0;
vlc->valid_bits = 0;
@@ -102,11 +106,11 @@ vl_vlc_init(struct vl_vlc *vlc, const uint8_t *data, unsigned len)
while (pointer_to_uintptr(data) & 3) {
vlc->buffer |= (uint64_t)*data << (56 - vlc->valid_bits);
++data;
- --len;
+ --total_len;
vlc->valid_bits += 8;
}
vlc->data = (uint32_t*)data;
- vlc->end = (uint32_t*)(data + len);
+ vlc->end = (uint32_t*)(data + total_len);
vl_vlc_fillbits(vlc);
vl_vlc_fillbits(vlc);
@@ -170,4 +174,10 @@ vl_vlc_get_vlclbf(struct vl_vlc *vlc, const struct vl_vlc_entry *tbl, unsigned n
return tbl->value;
}
+static INLINE void
+vl_vlc_bitalign(struct vl_vlc *vlc)
+{
+ vl_vlc_eatbits(vlc, vlc->valid_bits & 7);
+}
+
#endif /* vl_vlc_h */
diff --git a/src/gallium/drivers/nouveau/nouveau_video.c b/src/gallium/drivers/nouveau/nouveau_video.c
index 1144be2..f2d5590 100644
--- a/src/gallium/drivers/nouveau/nouveau_video.c
+++ b/src/gallium/drivers/nouveau/nouveau_video.c
@@ -424,44 +424,24 @@ nouveau_decoder_surface_index(struct nouveau_decoder *dec,
}
static void
-nouveau_decoder_set_picture_parameters(struct pipe_video_decoder *decoder,
- struct pipe_picture_desc *picture_desc)
-{
- struct nouveau_decoder *dec = (struct nouveau_decoder *)decoder;
- struct pipe_mpeg12_picture_desc *desc;
- desc = (struct pipe_mpeg12_picture_desc *)picture_desc;
- dec->picture_structure = desc->picture_structure;
-}
-
-static void
-nouveau_decoder_set_reference_frames(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer **buffers,
- unsigned count)
-{
- struct nouveau_decoder *dec = (struct nouveau_decoder *)decoder;
- if (count >= 1 && buffers[0])
- dec->past = nouveau_decoder_surface_index(dec, buffers[0]);
- if (count >= 2 && buffers[1])
- dec->future = nouveau_decoder_surface_index(dec, buffers[1]);
-}
-
-static void
-nouveau_decoder_set_decode_target(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer *buffer)
-{
- struct nouveau_decoder *dec = (struct nouveau_decoder *)decoder;
- dec->current = nouveau_decoder_surface_index(dec, buffer);
-}
-
-static void
nouveau_decoder_decode_macroblock(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
const struct pipe_macroblock *pipe_mb,
unsigned num_macroblocks)
{
struct nouveau_decoder *dec = (struct nouveau_decoder *)decoder;
+ struct pipe_mpeg12_picture_desc *desc = (struct pipe_mpeg12_picture_desc*)picture;
const struct pipe_mpeg12_macroblock *mb;
unsigned i;
+
+ dec->current = nouveau_decoder_surface_index(dec, target);
assert(dec->current < 8);
+ dec->picture_structure = desc->picture_structure;
+ if (desc->ref_backward)
+ dec->past = nouveau_decoder_surface_index(dec, desc->ref_backward);
+ if (desc->ref_forward)
+ dec->future = nouveau_decoder_surface_index(dec, desc->ref_forward);
if (nouveau_vpe_init(dec)) return;
mb = (const struct pipe_mpeg12_macroblock *)pipe_mb;
@@ -511,16 +491,6 @@ nouveau_decoder_destroy(struct pipe_video_decoder *decoder)
FREE(dec);
}
-static void
-nouveau_decoder_begin_frame(struct pipe_video_decoder *decoder)
-{
-}
-
-static void
-nouveau_decoder_end_frame(struct pipe_video_decoder *decoder)
-{
-}
-
static struct pipe_video_decoder *
nouveau_create_decoder(struct pipe_context *context,
struct nouveau_screen *screen,
@@ -571,11 +541,6 @@ nouveau_create_decoder(struct pipe_context *context,
dec->base.height = height;
dec->base.max_references = max_references;
dec->base.destroy = nouveau_decoder_destroy;
- dec->base.begin_frame = nouveau_decoder_begin_frame;
- dec->base.end_frame = nouveau_decoder_end_frame;
- dec->base.set_decode_target = nouveau_decoder_set_decode_target;
- dec->base.set_picture_parameters = nouveau_decoder_set_picture_parameters;
- dec->base.set_reference_frames = nouveau_decoder_set_reference_frames;
dec->base.decode_macroblock = nouveau_decoder_decode_macroblock;
dec->base.flush = nouveau_decoder_flush;
dec->screen = screen;
@@ -814,8 +779,6 @@ nouveau_screen_get_video_param(struct pipe_screen *pscreen,
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(pscreen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(pscreen, profile);
default:
debug_printf("unknown video param: %d\n", param);
return 0;
diff --git a/src/gallium/drivers/nvfx/nvfx_screen.c b/src/gallium/drivers/nvfx/nvfx_screen.c
index f56c697..a069771 100644
--- a/src/gallium/drivers/nvfx/nvfx_screen.c
+++ b/src/gallium/drivers/nvfx/nvfx_screen.c
@@ -222,8 +222,6 @@ nvfx_screen_get_video_param(struct pipe_screen *screen,
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(screen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(screen, profile);
default:
return 0;
}
diff --git a/src/gallium/drivers/r300/r300_screen.c b/src/gallium/drivers/r300/r300_screen.c
index e734ff2..ceeb365 100644
--- a/src/gallium/drivers/r300/r300_screen.c
+++ b/src/gallium/drivers/r300/r300_screen.c
@@ -302,8 +302,6 @@ static int r300_get_video_param(struct pipe_screen *screen,
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(screen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(screen, profile);
default:
return 0;
}
diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c
index ddea167..1ea144d 100644
--- a/src/gallium/drivers/r600/r600_pipe.c
+++ b/src/gallium/drivers/r600/r600_pipe.c
@@ -532,8 +532,6 @@ static int r600_get_video_param(struct pipe_screen *screen,
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(screen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(screen, profile);
default:
return 0;
}
diff --git a/src/gallium/drivers/softpipe/sp_screen.c b/src/gallium/drivers/softpipe/sp_screen.c
index 7a09be7..23f1c4d 100644
--- a/src/gallium/drivers/softpipe/sp_screen.c
+++ b/src/gallium/drivers/softpipe/sp_screen.c
@@ -188,8 +188,6 @@ softpipe_get_video_param(struct pipe_screen *screen,
case PIPE_VIDEO_CAP_MAX_WIDTH:
case PIPE_VIDEO_CAP_MAX_HEIGHT:
return vl_video_buffer_max_size(screen);
- case PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED:
- return vl_num_buffers_desired(screen, profile);
default:
return 0;
}
diff --git a/src/gallium/include/pipe/p_video_decoder.h b/src/gallium/include/pipe/p_video_decoder.h
index 034b860..6c940b7 100644
--- a/src/gallium/include/pipe/p_video_decoder.h
+++ b/src/gallium/include/pipe/p_video_decoder.h
@@ -60,55 +60,11 @@ struct pipe_video_decoder
void (*destroy)(struct pipe_video_decoder *decoder);
/**
- * Creates a decoder buffer
- */
- void *(*create_buffer)(struct pipe_video_decoder *decoder);
-
- /**
- * Destroys a decoder buffer
- */
- void (*destroy_buffer)(struct pipe_video_decoder *decoder, void *buffer);
-
- /**
- * set the current decoder buffer
- */
- void (*set_decode_buffer)(struct pipe_video_decoder *decoder, void *buffer);
-
- /**
- * set the picture parameters for the next frame
- * only used for bitstream decoding
- */
- void (*set_picture_parameters)(struct pipe_video_decoder *decoder,
- struct pipe_picture_desc *picture);
-
- /**
- * set the quantification matrixes
- */
- void (*set_quant_matrix)(struct pipe_video_decoder *decoder,
- const struct pipe_quant_matrix *matrix);
-
- /**
- * set target where video data is decoded to
- */
- void (*set_decode_target)(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer *target);
-
- /**
- * set reference frames for motion compensation
- */
- void (*set_reference_frames)(struct pipe_video_decoder *decoder,
- struct pipe_video_buffer **ref_frames,
- unsigned num_ref_frames);
-
- /**
- * start decoding of a new frame
- */
- void (*begin_frame)(struct pipe_video_decoder *decoder);
-
- /**
* decode a macroblock
*/
void (*decode_macroblock)(struct pipe_video_decoder *decoder,
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
const struct pipe_macroblock *macroblocks,
unsigned num_macroblocks);
@@ -116,12 +72,10 @@ struct pipe_video_decoder
* decode a bitstream
*/
void (*decode_bitstream)(struct pipe_video_decoder *decoder,
- unsigned num_bytes, const void *data);
-
- /**
- * end decoding of the current frame
- */
- void (*end_frame)(struct pipe_video_decoder *decoder);
+ struct pipe_video_buffer *target,
+ struct pipe_picture_desc *picture,
+ unsigned num_buffers, unsigned total_bytes,
+ unsigned const *num_bytes, const void *const *data);
/**
* flush any outstanding command buffers to the hardware
diff --git a/src/gallium/include/pipe/p_video_enums.h b/src/gallium/include/pipe/p_video_enums.h
index ea25a25..1378606 100644
--- a/src/gallium/include/pipe/p_video_enums.h
+++ b/src/gallium/include/pipe/p_video_enums.h
@@ -50,8 +50,7 @@ enum pipe_video_cap
PIPE_VIDEO_CAP_SUPPORTED = 0,
PIPE_VIDEO_CAP_NPOT_TEXTURES = 1,
PIPE_VIDEO_CAP_MAX_WIDTH = 2,
- PIPE_VIDEO_CAP_MAX_HEIGHT = 3,
- PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED = 4
+ PIPE_VIDEO_CAP_MAX_HEIGHT = 3
};
enum pipe_video_codec
diff --git a/src/gallium/include/pipe/p_video_state.h b/src/gallium/include/pipe/p_video_state.h
index 9a70eb7..0650b19 100644
--- a/src/gallium/include/pipe/p_video_state.h
+++ b/src/gallium/include/pipe/p_video_state.h
@@ -124,6 +124,7 @@ struct pipe_macroblock
struct pipe_mpeg12_picture_desc
{
struct pipe_picture_desc base;
+ struct pipe_video_buffer *ref_forward, *ref_backward;
unsigned picture_coding_type;
unsigned picture_structure;
@@ -138,12 +139,6 @@ struct pipe_mpeg12_picture_desc
unsigned full_pel_forward_vector;
unsigned full_pel_backward_vector;
unsigned num_slices;
-};
-
-struct pipe_mpeg12_quant_matrix
-{
- struct pipe_quant_matrix base;
-
const uint8_t *intra_matrix;
const uint8_t *non_intra_matrix;
};
@@ -191,6 +186,7 @@ struct pipe_mpeg12_macroblock
struct pipe_mpeg4_picture_desc
{
struct pipe_picture_desc base;
+ struct pipe_video_buffer *ref_forward, *ref_backward;
int32_t trd[2];
int32_t trb[2];
uint16_t vop_time_increment_resolution;
@@ -205,12 +201,6 @@ struct pipe_mpeg4_picture_desc
uint8_t rounding_control;
uint8_t alternate_vertical_scan_flag;
uint8_t top_field_first;
-};
-
-struct pipe_mpeg4_quant_matrix
-{
- struct pipe_quant_matrix base;
-
const uint8_t *intra_matrix;
const uint8_t *non_intra_matrix;
};
@@ -218,6 +208,7 @@ struct pipe_mpeg4_quant_matrix
struct pipe_vc1_picture_desc
{
struct pipe_picture_desc base;
+ struct pipe_video_buffer *ref_forward, *ref_backward;
uint32_t slice_count;
uint8_t picture_type;
uint8_t frame_coding_mode;
diff --git a/src/gallium/state_trackers/vdpau/decode.c b/src/gallium/state_trackers/vdpau/decode.c
index 47212e3..45009ef 100644
--- a/src/gallium/state_trackers/vdpau/decode.c
+++ b/src/gallium/state_trackers/vdpau/decode.c
@@ -48,7 +48,6 @@ vlVdpDecoderCreate(VdpDevice device,
vlVdpDevice *dev;
vlVdpDecoder *vldecoder;
VdpStatus ret;
- unsigned i;
bool supported;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Creating decoder\n");
@@ -98,25 +97,6 @@ vlVdpDecoderCreate(VdpDevice device,
goto error_decoder;
}
- vldecoder->num_buffers = pipe->screen->get_video_param
- (
- pipe->screen, p_profile,
- PIPE_VIDEO_CAP_NUM_BUFFERS_DESIRED
- );
- vldecoder->cur_buffer = 0;
-
- vldecoder->buffers = CALLOC(vldecoder->num_buffers, sizeof(void*));
- if (!vldecoder->buffers)
- goto error_alloc_buffers;
-
- for (i = 0; i < vldecoder->num_buffers; ++i) {
- vldecoder->buffers[i] = vldecoder->decoder->create_buffer(vldecoder->decoder);
- if (!vldecoder->buffers[i]) {
- ret = VDP_STATUS_ERROR;
- goto error_create_buffers;
- }
- }
-
*decoder = vlAddDataHTAB(vldecoder);
if (*decoder == 0) {
ret = VDP_STATUS_ERROR;
@@ -128,16 +108,6 @@ vlVdpDecoderCreate(VdpDevice device,
return VDP_STATUS_OK;
error_handle:
-error_create_buffers:
-
- for (i = 0; i < vldecoder->num_buffers; ++i)
- if (vldecoder->buffers[i])
- vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffers[i]);
-
- FREE(vldecoder->buffers);
-
-error_alloc_buffers:
-
vldecoder->decoder->destroy(vldecoder->decoder);
error_decoder:
@@ -152,7 +122,6 @@ VdpStatus
vlVdpDecoderDestroy(VdpDecoder decoder)
{
vlVdpDecoder *vldecoder;
- unsigned i;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Destroying decoder\n");
@@ -160,12 +129,6 @@ vlVdpDecoderDestroy(VdpDecoder decoder)
if (!vldecoder)
return VDP_STATUS_INVALID_HANDLE;
- for (i = 0; i < vldecoder->num_buffers; ++i)
- if (vldecoder->buffers[i])
- vldecoder->decoder->destroy_buffer(vldecoder->decoder, vldecoder->buffers[i]);
-
- FREE(vldecoder->buffers);
-
vldecoder->decoder->destroy(vldecoder->decoder);
FREE(vldecoder);
@@ -201,62 +164,46 @@ vlVdpDecoderGetParameters(VdpDecoder decoder,
* Decode a mpeg 1/2 video.
*/
static VdpStatus
-vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder,
+vlVdpDecoderRenderMpeg12(struct pipe_mpeg12_picture_desc *picture,
VdpPictureInfoMPEG1Or2 *picture_info)
{
- struct pipe_mpeg12_picture_desc picture;
- struct pipe_mpeg12_quant_matrix quant;
- struct pipe_video_buffer *ref_frames[2];
- unsigned i;
-
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG12\n");
- i = 0;
-
/* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */
if (picture_info->forward_reference != VDP_INVALID_HANDLE) {
- ref_frames[i] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
- if (!ref_frames[i])
+ picture->ref_forward = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
+ if (!picture->ref_forward)
return VDP_STATUS_INVALID_HANDLE;
- ++i;
}
+ else
+ picture->ref_forward = NULL;
if (picture_info->backward_reference != VDP_INVALID_HANDLE) {
- ref_frames[i] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
- if (!ref_frames[i])
+ picture->ref_backward = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
+ if (!picture->ref_backward)
return VDP_STATUS_INVALID_HANDLE;
- ++i;
}
-
- decoder->set_reference_frames(decoder, ref_frames, i);
-
- memset(&picture, 0, sizeof(picture));
- picture.base.profile = decoder->profile;
- picture.picture_coding_type = picture_info->picture_coding_type;
- picture.picture_structure = picture_info->picture_structure;
- picture.frame_pred_frame_dct = picture_info->frame_pred_frame_dct;
- picture.q_scale_type = picture_info->q_scale_type;
- picture.alternate_scan = picture_info->alternate_scan;
- picture.intra_vlc_format = picture_info->intra_vlc_format;
- picture.concealment_motion_vectors = picture_info->concealment_motion_vectors;
- picture.intra_dc_precision = picture_info->intra_dc_precision;
- picture.f_code[0][0] = picture_info->f_code[0][0] - 1;
- picture.f_code[0][1] = picture_info->f_code[0][1] - 1;
- picture.f_code[1][0] = picture_info->f_code[1][0] - 1;
- picture.f_code[1][1] = picture_info->f_code[1][1] - 1;
- picture.num_slices = picture_info->slice_count;
- picture.top_field_first = picture_info->top_field_first;
- picture.full_pel_forward_vector = picture_info->full_pel_forward_vector;
- picture.full_pel_backward_vector = picture_info->full_pel_backward_vector;
-
- decoder->set_picture_parameters(decoder, &picture.base);
-
- memset(&quant, 0, sizeof(quant));
- quant.base.codec = PIPE_VIDEO_CODEC_MPEG12;
- quant.intra_matrix = picture_info->intra_quantizer_matrix;
- quant.non_intra_matrix = picture_info->non_intra_quantizer_matrix;
-
- decoder->set_quant_matrix(decoder, &quant.base);
+ else
+ picture->ref_backward = NULL;
+
+ picture->picture_coding_type = picture_info->picture_coding_type;
+ picture->picture_structure = picture_info->picture_structure;
+ picture->frame_pred_frame_dct = picture_info->frame_pred_frame_dct;
+ picture->q_scale_type = picture_info->q_scale_type;
+ picture->alternate_scan = picture_info->alternate_scan;
+ picture->intra_vlc_format = picture_info->intra_vlc_format;
+ picture->concealment_motion_vectors = picture_info->concealment_motion_vectors;
+ picture->intra_dc_precision = picture_info->intra_dc_precision;
+ picture->f_code[0][0] = picture_info->f_code[0][0] - 1;
+ picture->f_code[0][1] = picture_info->f_code[0][1] - 1;
+ picture->f_code[1][0] = picture_info->f_code[1][0] - 1;
+ picture->f_code[1][1] = picture_info->f_code[1][1] - 1;
+ picture->num_slices = picture_info->slice_count;
+ picture->top_field_first = picture_info->top_field_first;
+ picture->full_pel_forward_vector = picture_info->full_pel_forward_vector;
+ picture->full_pel_backward_vector = picture_info->full_pel_backward_vector;
+ picture->intra_matrix = picture_info->intra_quantizer_matrix;
+ picture->non_intra_matrix = picture_info->non_intra_quantizer_matrix;
return VDP_STATUS_OK;
}
@@ -264,114 +211,101 @@ vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder,
* Decode a mpeg 4 video.
*/
static VdpStatus
-vlVdpDecoderRenderMpeg4(struct pipe_video_decoder *decoder,
- VdpPictureInfoMPEG4Part2 *picture_info)
+vlVdpDecoderRenderMpeg4(struct pipe_mpeg4_picture_desc *picture,
+ VdpPictureInfoMPEG4Part2 *picture_info)
{
- struct pipe_mpeg4_picture_desc picture;
- struct pipe_mpeg4_quant_matrix quant;
- struct pipe_video_buffer *ref_frames[2] = {};
unsigned i;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG4\n");
- /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */
- if (picture_info->forward_reference != VDP_INVALID_HANDLE) {
- ref_frames[0] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
- if (!ref_frames[0])
+ if (picture_info->forward_reference != VDP_INVALID_HANDLE) {
+ picture->ref_forward = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
+ if (!picture->ref_forward)
return VDP_STATUS_INVALID_HANDLE;
}
+ else
+ picture->ref_forward = NULL;
- if (picture_info->backward_reference != VDP_INVALID_HANDLE) {
- ref_frames[1] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
- if (!ref_frames[1])
+ if (picture_info->backward_reference != VDP_INVALID_HANDLE) {
+ picture->ref_backward = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
+ if (!picture->ref_backward)
return VDP_STATUS_INVALID_HANDLE;
}
- decoder->set_reference_frames(decoder, ref_frames, 2);
+ else
+ picture->ref_backward = NULL;
- memset(&picture, 0, sizeof(picture));
- picture.base.profile = decoder->profile;
for (i = 0; i < 2; ++i) {
- picture.trd[i] = picture_info->trd[i];
- picture.trb[i] = picture_info->trb[i];
+ picture->trd[i] = picture_info->trd[i];
+ picture->trb[i] = picture_info->trb[i];
}
- picture.vop_time_increment_resolution = picture_info->vop_time_increment_resolution;
- picture.vop_coding_type = picture_info->vop_coding_type;
- picture.vop_fcode_forward = picture_info->vop_fcode_forward;
- picture.vop_fcode_backward = picture_info->vop_fcode_backward;
- picture.resync_marker_disable = picture_info->resync_marker_disable;
- picture.interlaced = picture_info->interlaced;
- picture.quant_type = picture_info->quant_type;
- picture.quarter_sample = picture_info->quarter_sample;
- picture.short_video_header = picture_info->short_video_header;
- picture.rounding_control = picture_info->rounding_control;
- picture.alternate_vertical_scan_flag = picture_info->alternate_vertical_scan_flag;
- picture.top_field_first = picture_info->top_field_first;
- decoder->set_picture_parameters(decoder, &picture.base);
-
- memset(&quant, 0, sizeof(quant));
- quant.base.codec = PIPE_VIDEO_CODEC_MPEG4;
- quant.intra_matrix = picture_info->intra_quantizer_matrix;
- quant.non_intra_matrix = picture_info->non_intra_quantizer_matrix;
- decoder->set_quant_matrix(decoder, &quant.base);
+ picture->vop_time_increment_resolution = picture_info->vop_time_increment_resolution;
+ picture->vop_coding_type = picture_info->vop_coding_type;
+ picture->vop_fcode_forward = picture_info->vop_fcode_forward;
+ picture->vop_fcode_backward = picture_info->vop_fcode_backward;
+ picture->resync_marker_disable = picture_info->resync_marker_disable;
+ picture->interlaced = picture_info->interlaced;
+ picture->quant_type = picture_info->quant_type;
+ picture->quarter_sample = picture_info->quarter_sample;
+ picture->short_video_header = picture_info->short_video_header;
+ picture->rounding_control = picture_info->rounding_control;
+ picture->alternate_vertical_scan_flag = picture_info->alternate_vertical_scan_flag;
+ picture->top_field_first = picture_info->top_field_first;
+ picture->intra_matrix = picture_info->intra_quantizer_matrix;
+ picture->non_intra_matrix = picture_info->non_intra_quantizer_matrix;
return VDP_STATUS_OK;
}
static VdpStatus
-vlVdpDecoderRenderVC1(struct pipe_video_decoder *decoder,
+vlVdpDecoderRenderVC1(struct pipe_vc1_picture_desc *picture,
VdpPictureInfoVC1 *picture_info)
{
- struct pipe_vc1_picture_desc picture;
- struct pipe_video_buffer *ref_frames[2] = {};
- unsigned i;
-
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding VC-1\n");
- /* if surfaces equals VDP_STATUS_INVALID_HANDLE, they are not used */
if (picture_info->forward_reference != VDP_INVALID_HANDLE) {
- ref_frames[0] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
- if (!ref_frames[0])
+ picture->ref_forward = ((vlVdpSurface *)vlGetDataHTAB(picture_info->forward_reference))->video_buffer;
+ if (!picture->ref_forward)
return VDP_STATUS_INVALID_HANDLE;
}
+ else
+ picture->ref_forward = NULL;
if (picture_info->backward_reference != VDP_INVALID_HANDLE) {
- ref_frames[1] = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
- if (!ref_frames[1])
+ picture->ref_backward = ((vlVdpSurface *)vlGetDataHTAB(picture_info->backward_reference))->video_buffer;
+ if (!picture->ref_backward)
return VDP_STATUS_INVALID_HANDLE;
}
- decoder->set_reference_frames(decoder, ref_frames, 2);
-
- memset(&picture, 0, sizeof(picture));
- picture.base.profile = decoder->profile;
- picture.slice_count = picture_info->slice_count;
- picture.picture_type = picture_info->picture_type;
- picture.frame_coding_mode = picture_info->frame_coding_mode;
- picture.postprocflag = picture_info->postprocflag;
- picture.pulldown = picture_info->pulldown;
- picture.interlace = picture_info->interlace;
- picture.tfcntrflag = picture_info->tfcntrflag;
- picture.finterpflag = picture_info->finterpflag;
- picture.psf = picture_info->psf;
- picture.dquant = picture_info->dquant;
- picture.panscan_flag = picture_info->panscan_flag;
- picture.refdist_flag = picture_info->refdist_flag;
- picture.quantizer = picture_info->quantizer;
- picture.extended_mv = picture_info->extended_mv;
- picture.extended_dmv = picture_info->extended_dmv;
- picture.overlap = picture_info->overlap;
- picture.vstransform = picture_info->vstransform;
- picture.loopfilter = picture_info->loopfilter;
- picture.fastuvmc = picture_info->fastuvmc;
- picture.range_mapy_flag = picture_info->range_mapy_flag;
- picture.range_mapy = picture_info->range_mapy;
- picture.range_mapuv_flag = picture_info->range_mapuv_flag;
- picture.range_mapuv = picture_info->range_mapuv;
- picture.multires = picture_info->multires;
- picture.syncmarker = picture_info->syncmarker;
- picture.rangered = picture_info->rangered;
- picture.maxbframes = picture_info->maxbframes;
- picture.deblockEnable = picture_info->deblockEnable;
- picture.pquant = picture_info->pquant;
- decoder->set_picture_parameters(decoder, &picture.base);
+ else
+ picture->ref_backward = NULL;
+
+ picture->slice_count = picture_info->slice_count;
+ picture->picture_type = picture_info->picture_type;
+ picture->frame_coding_mode = picture_info->frame_coding_mode;
+ picture->postprocflag = picture_info->postprocflag;
+ picture->pulldown = picture_info->pulldown;
+ picture->interlace = picture_info->interlace;
+ picture->tfcntrflag = picture_info->tfcntrflag;
+ picture->finterpflag = picture_info->finterpflag;
+ picture->psf = picture_info->psf;
+ picture->dquant = picture_info->dquant;
+ picture->panscan_flag = picture_info->panscan_flag;
+ picture->refdist_flag = picture_info->refdist_flag;
+ picture->quantizer = picture_info->quantizer;
+ picture->extended_mv = picture_info->extended_mv;
+ picture->extended_dmv = picture_info->extended_dmv;
+ picture->overlap = picture_info->overlap;
+ picture->vstransform = picture_info->vstransform;
+ picture->loopfilter = picture_info->loopfilter;
+ picture->fastuvmc = picture_info->fastuvmc;
+ picture->range_mapy_flag = picture_info->range_mapy_flag;
+ picture->range_mapy = picture_info->range_mapy;
+ picture->range_mapuv_flag = picture_info->range_mapuv_flag;
+ picture->range_mapuv = picture_info->range_mapuv;
+ picture->multires = picture_info->multires;
+ picture->syncmarker = picture_info->syncmarker;
+ picture->rangered = picture_info->rangered;
+ picture->maxbframes = picture_info->maxbframes;
+ picture->deblockEnable = picture_info->deblockEnable;
+ picture->pquant = picture_info->pquant;
return VDP_STATUS_OK;
}
@@ -390,6 +324,12 @@ vlVdpDecoderRender(VdpDecoder decoder,
VdpStatus ret;
struct pipe_video_decoder *dec;
unsigned i;
+ union {
+ struct pipe_picture_desc base;
+ struct pipe_mpeg12_picture_desc mpeg12;
+ struct pipe_mpeg4_picture_desc mpeg4;
+ struct pipe_vc1_picture_desc vc1;
+ } desc;
VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding\n");
@@ -412,21 +352,17 @@ vlVdpDecoderRender(VdpDecoder decoder,
// TODO: Recreate decoder with correct chroma
return VDP_STATUS_INVALID_CHROMA_TYPE;
- ++vldecoder->cur_buffer;
- vldecoder->cur_buffer %= vldecoder->num_buffers;
-
- dec->set_decode_buffer(dec, vldecoder->buffers[vldecoder->cur_buffer]);
- dec->set_decode_target(dec, vlsurf->video_buffer);
-
+ memset(&desc, 0, sizeof(desc));
+ desc.base.profile = dec->profile;
switch (u_reduce_video_profile(dec->profile)) {
case PIPE_VIDEO_CODEC_MPEG12:
- ret = vlVdpDecoderRenderMpeg12(dec, (VdpPictureInfoMPEG1Or2 *)picture_info);
+ ret = vlVdpDecoderRenderMpeg12(&desc.mpeg12, (VdpPictureInfoMPEG1Or2 *)picture_info);
break;
case PIPE_VIDEO_CODEC_MPEG4:
- ret = vlVdpDecoderRenderMpeg4(dec, (VdpPictureInfoMPEG4Part2 *)picture_info);
+ ret = vlVdpDecoderRenderMpeg4(&desc.mpeg4, (VdpPictureInfoMPEG4Part2 *)picture_info);
break;
case PIPE_VIDEO_CODEC_VC1:
- ret = vlVdpDecoderRenderVC1(dec, (VdpPictureInfoVC1 *)picture_info);
+ ret = vlVdpDecoderRenderVC1(&desc.vc1, (VdpPictureInfoVC1 *)picture_info);
break;
default:
return VDP_STATUS_INVALID_DECODER_PROFILE;
@@ -434,10 +370,19 @@ vlVdpDecoderRender(VdpDecoder decoder,
if (ret != VDP_STATUS_OK)
return ret;
- dec->begin_frame(dec);
- for (i = 0; i < bitstream_buffer_count; ++i)
- dec->decode_bitstream(dec, bitstream_buffers[i].bitstream_bytes,
- bitstream_buffers[i].bitstream);
- dec->end_frame(dec);
+ if (bitstream_buffer_count) {
+ void const *data[bitstream_buffer_count];
+ uint32_t sizes[bitstream_buffer_count];
+ uint32_t total_size = 0;
+ for (i = 0; i < bitstream_buffer_count; ++i) {
+ data[i] = bitstream_buffers[i].bitstream;
+ sizes[i] = bitstream_buffers[i].bitstream_bytes;
+ total_size += sizes[i];
+ }
+ dec->decode_bitstream(dec, vlsurf->video_buffer, &desc.base,
+ bitstream_buffer_count, total_size,
+ sizes, data);
+ } else
+ return VDP_STATUS_INVALID_VALUE;
return ret;
}
diff --git a/src/gallium/state_trackers/vdpau/vdpau_private.h b/src/gallium/state_trackers/vdpau/vdpau_private.h
index aae6f78..5f93148 100644
--- a/src/gallium/state_trackers/vdpau/vdpau_private.h
+++ b/src/gallium/state_trackers/vdpau/vdpau_private.h
@@ -327,9 +327,6 @@ typedef struct
{
vlVdpDevice *device;
struct pipe_video_decoder *decoder;
- unsigned num_buffers;
- void **buffers;
- unsigned cur_buffer;
} vlVdpDecoder;
typedef uint32_t vlHandle;
diff --git a/src/gallium/state_trackers/xorg/xvmc/surface.c b/src/gallium/state_trackers/xorg/xvmc/surface.c
index e6c5a89..f55691d 100644
--- a/src/gallium/state_trackers/xorg/xvmc/surface.c
+++ b/src/gallium/state_trackers/xorg/xvmc/surface.c
@@ -96,73 +96,6 @@ MacroBlocksToPipe(XvMCContextPrivate *context,
}
}
-static void
-SetDecoderStatus(XvMCSurfacePrivate *surface)
-{
- struct pipe_video_decoder *decoder;
- struct pipe_video_buffer *ref_frames[2];
- struct pipe_mpeg12_picture_desc desc = { { PIPE_VIDEO_PROFILE_MPEG1} };
-
- XvMCContextPrivate *context_priv;
-
- unsigned i, num_refs = 0;
-
- desc.picture_structure = surface->picture_structure;
-
- assert(surface);
-
- context_priv = surface->context->privData;
- decoder = context_priv->decoder;
-
- if (surface->decode_buffer)
- decoder->set_decode_buffer(decoder, surface->decode_buffer);
- decoder->set_decode_target(decoder, surface->video_buffer);
-
- for (i = 0; i < 2; ++i) {
- if (surface->ref[i]) {
- XvMCSurfacePrivate *ref = surface->ref[i]->privData;
-
- if (ref)
- ref_frames[num_refs++] = ref->video_buffer;
- }
- }
- decoder->set_reference_frames(decoder, ref_frames, num_refs);
- decoder->set_picture_parameters(context_priv->decoder, &desc.base);
-}
-
-static void
-RecursiveEndFrame(XvMCSurfacePrivate *surface)
-{
- XvMCContextPrivate *context_priv;
- unsigned i;
-
- assert(surface);
-
- context_priv = surface->context->privData;
-
- for ( i = 0; i < 2; ++i ) {
- if (surface->ref[i]) {
- XvMCSurface *ref = surface->ref[i];
-
- assert(ref);
-
- surface->ref[i] = NULL;
- RecursiveEndFrame(ref->privData);
- surface->ref[i] = ref;
- }
- }
-
- if (surface->picture_structure) {
- SetDecoderStatus(surface);
- surface->picture_structure = 0;
-
- for (i = 0; i < 2; ++i)
- surface->ref[i] = NULL;
-
- context_priv->decoder->end_frame(context_priv->decoder);
- }
-}
-
PUBLIC
Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surface)
{
@@ -186,8 +119,6 @@ Status XvMCCreateSurface(Display *dpy, XvMCContext *context, XvMCSurface *surfac
if (!surface_priv)
return BadAlloc;
- if (context_priv->decoder->create_buffer)
- surface_priv->decode_buffer = context_priv->decoder->create_buffer(context_priv->decoder);
surface_priv->video_buffer = pipe->create_video_buffer
(
pipe, PIPE_FORMAT_NV12, context_priv->decoder->chroma_format,
@@ -218,7 +149,7 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
)
{
struct pipe_mpeg12_macroblock mb[num_macroblocks];
- struct pipe_video_decoder *decoder;
+ struct pipe_mpeg12_picture_desc desc = { { PIPE_VIDEO_PROFILE_MPEG1} };
XvMCContextPrivate *context_priv;
XvMCSurfacePrivate *target_surface_priv;
@@ -257,7 +188,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
assert(flags == 0 || flags == XVMC_SECOND_FIELD);
context_priv = context->privData;
- decoder = context_priv->decoder;
target_surface_priv = target_surface->privData;
past_surface_priv = past_surface ? past_surface->privData : NULL;
@@ -268,12 +198,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
assert(!future_surface || future_surface_priv->context == context);
// call end frame on all referenced frames
- if (past_surface)
- RecursiveEndFrame(past_surface->privData);
-
- if (future_surface)
- RecursiveEndFrame(future_surface->privData);
-
xvmc_mb = macroblocks->macro_blocks + first_macroblock;
/* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
@@ -284,24 +208,24 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
(xvmc_mb->x == 0 && xvmc_mb->y == 0))) {
// If they change anyway we must assume that the current frame is ended
- RecursiveEndFrame(target_surface_priv);
+ context_priv->decoder->flush(context_priv->decoder);
}
target_surface_priv->ref[0] = past_surface;
target_surface_priv->ref[1] = future_surface;
- if (target_surface_priv->picture_structure)
- SetDecoderStatus(target_surface_priv);
- else {
- target_surface_priv->picture_structure = picture_structure;
- SetDecoderStatus(target_surface_priv);
- decoder->begin_frame(decoder);
- }
+ target_surface_priv->picture_structure = picture_structure;
+
+ desc.picture_structure = picture_structure;
+ if (past_surface)
+ desc.ref_forward = ((XvMCSurfacePrivate*)past_surface->privData)->video_buffer;
+ if (future_surface)
+ desc.ref_backward = ((XvMCSurfacePrivate*)future_surface->privData)->video_buffer;
MacroBlocksToPipe(context_priv, target_surface_priv, picture_structure,
xvmc_mb, blocks, mb, num_macroblocks);
- context_priv->decoder->decode_macroblock(context_priv->decoder, &mb[0].base, num_macroblocks);
+ context_priv->decoder->decode_macroblock(context_priv->decoder, target_surface_priv->video_buffer, &desc.base, &mb[0].base, num_macroblocks);
XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);
@@ -365,6 +289,7 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
surface_priv = surface->privData;
context = surface_priv->context;
context_priv = context->privData;
+ context_priv->decoder->flush(context_priv->decoder);
assert(flags == XVMC_TOP_FIELD || flags == XVMC_BOTTOM_FIELD || flags == XVMC_FRAME_PICTURE);
assert(srcx + srcw - 1 < surface->width);
@@ -399,10 +324,6 @@ Status XvMCPutSurface(Display *dpy, XvMCSurface *surface, Drawable drawable,
assert(desty + desth - 1 < drawable_surface->height);
*/
- RecursiveEndFrame(surface_priv);
-
- context_priv->decoder->flush(context_priv->decoder);
-
vl_compositor_clear_layers(compositor);
vl_compositor_set_buffer_layer(compositor, 0, surface_priv->video_buffer, &src_rect, NULL);
@@ -499,13 +420,7 @@ Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
surface_priv = surface->privData;
context_priv = surface_priv->context->privData;
-
- if (surface_priv->picture_structure) {
- SetDecoderStatus(surface_priv);
- context_priv->decoder->end_frame(context_priv->decoder);
- }
- if (surface_priv->decode_buffer)
- context_priv->decoder->destroy_buffer(context_priv->decoder, surface_priv->decode_buffer);
+ context_priv->decoder->flush(context_priv->decoder);
surface_priv->video_buffer->destroy(surface_priv->video_buffer);
FREE(surface_priv);
surface->privData = NULL;
diff --git a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
index 305e51f..642baa8 100644
--- a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
+++ b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
@@ -69,7 +69,6 @@ typedef struct
typedef struct
{
- void *decode_buffer;
struct pipe_video_buffer *video_buffer;
/* nonzero if this picture is already being decoded */
--
1.7.7.3
More information about the mesa-dev
mailing list