[PATCH 3/6] g3dvl: Rework the decoder interface part 3/6

Christian König deathsimple at vodafone.de
Wed Aug 10 09:07:01 PDT 2011


Revert back to a macroblock based interface. The structure used
tries to keep as close to the spec as possible.
---
 src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c     |   34 ++--
 src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h     |    9 +-
 src/gallium/auxiliary/vl/vl_mpeg12_decoder.c       |  238 ++++++++++++++----
 src/gallium/auxiliary/vl/vl_mpeg12_decoder.h       |    6 +
 src/gallium/auxiliary/vl/vl_vertex_buffers.c       |   12 +-
 src/gallium/auxiliary/vl/vl_vertex_buffers.h       |   51 ++++-
 src/gallium/include/pipe/p_video_decoder.h         |   25 +--
 src/gallium/include/pipe/p_video_state.h           |  108 ++++++---
 src/gallium/state_trackers/vdpau/decode.c          |    5 +-
 src/gallium/state_trackers/xorg/xvmc/context.c     |    1 +
 src/gallium/state_trackers/xorg/xvmc/surface.c     |  259 ++++----------------
 .../state_trackers/xorg/xvmc/xvmc_private.h        |   13 +-
 12 files changed, 396 insertions(+), 365 deletions(-)

diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c b/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c
index 9dd032e..61ffcd1 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.c
@@ -1197,7 +1197,7 @@ slice_intra_DCT(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_desc * pictur
 
    bs->ycbcr_stream[cc]->x = x;
    bs->ycbcr_stream[cc]->y = y;
-   bs->ycbcr_stream[cc]->intra = PIPE_MPEG12_DCT_INTRA;
+   bs->ycbcr_stream[cc]->intra = 1;
    bs->ycbcr_stream[cc]->coding = coding;
 
    vl_vlc_needbits(&bs->vlc);
@@ -1233,7 +1233,7 @@ slice_non_intra_DCT(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_desc * pi
 
    bs->ycbcr_stream[cc]->x = x;
    bs->ycbcr_stream[cc]->y = y;
-   bs->ycbcr_stream[cc]->intra = PIPE_MPEG12_DCT_DELTA;
+   bs->ycbcr_stream[cc]->intra = 0;
    bs->ycbcr_stream[cc]->coding = coding;
 
    memset(dest, 0, sizeof(int16_t) * 64);
@@ -1250,7 +1250,7 @@ slice_non_intra_DCT(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_desc * pi
 }
 
 static INLINE void
-motion_mp1(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_mp1(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int motion_x, motion_y;
 
@@ -1268,7 +1268,7 @@ motion_mp1(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector
 }
 
 static INLINE void
-motion_fr_frame(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_fr_frame(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int motion_x, motion_y;
 
@@ -1286,7 +1286,7 @@ motion_fr_frame(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionve
 }
 
 static INLINE void
-motion_fr_field(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_fr_field(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int motion_x, motion_y;
 
@@ -1320,7 +1320,7 @@ motion_fr_field(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionve
 }
 
 static INLINE void
-motion_fr_dmv(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_fr_dmv(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int motion_x, motion_y;
 
@@ -1340,7 +1340,7 @@ motion_fr_dmv(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvect
 
 /* like motion_frame, but parsing without actual motion compensation */
 static INLINE void
-motion_fr_conceal(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_fr_conceal(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int tmp;
 
@@ -1360,7 +1360,7 @@ motion_fr_conceal(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motion
 }
 
 static INLINE void
-motion_fi_field(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_fi_field(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int motion_x, motion_y;
 
@@ -1384,7 +1384,7 @@ motion_fi_field(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionve
 }
 
 static INLINE void
-motion_fi_16x8(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_fi_16x8(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int motion_x, motion_y;
 
@@ -1425,7 +1425,7 @@ motion_fi_16x8(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvec
 }
 
 static INLINE void
-motion_fi_dmv(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_fi_dmv(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int motion_x, motion_y;
 
@@ -1445,7 +1445,7 @@ motion_fi_dmv(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvect
 
 
 static INLINE void
-motion_fi_conceal(struct vl_mpg12_bs *bs, unsigned f_code[2], struct pipe_motionvector *mv)
+motion_fi_conceal(struct vl_mpg12_bs *bs, unsigned f_code[2], struct vl_motionvector *mv)
 {
    int tmp;
 
@@ -1474,8 +1474,8 @@ do {							\
 
 static INLINE void
 store_motionvectors(struct vl_mpg12_bs *bs, unsigned *mv_pos,
-                    struct pipe_motionvector *mv_fwd,
-                    struct pipe_motionvector *mv_bwd)
+                    struct vl_motionvector *mv_fwd,
+                    struct vl_motionvector *mv_bwd)
 {
    bs->mv_stream[0][*mv_pos].top = mv_fwd->top;
    bs->mv_stream[0][*mv_pos].bottom =
@@ -1554,8 +1554,8 @@ slice_init(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_desc * picture,
 static INLINE bool
 decode_slice(struct vl_mpg12_bs *bs, struct pipe_mpeg12_picture_desc *picture)
 {
-   enum pipe_video_field_select default_field_select;
-   struct pipe_motionvector mv_fwd, mv_bwd;
+   enum vl_field_select default_field_select;
+   struct vl_motionvector mv_fwd, mv_bwd;
    enum pipe_mpeg12_dct_type dct_type;
 
    /* predictor for DC coefficients in intra blocks */
@@ -1787,8 +1787,8 @@ vl_mpg12_bs_init(struct vl_mpg12_bs *bs, unsigned width, unsigned height)
 }
 
 void
-vl_mpg12_bs_set_buffers(struct vl_mpg12_bs *bs, struct pipe_ycbcr_block *ycbcr_stream[VL_MAX_PLANES],
-                        short *ycbcr_buffer[VL_MAX_PLANES], struct pipe_motionvector *mv_stream[VL_MAX_REF_FRAMES])
+vl_mpg12_bs_set_buffers(struct vl_mpg12_bs *bs, struct vl_ycbcr_block *ycbcr_stream[VL_MAX_PLANES],
+                        short *ycbcr_buffer[VL_MAX_PLANES], struct vl_motionvector *mv_stream[VL_MAX_REF_FRAMES])
 {
    unsigned i;
 
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h b/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h
index 4e48a9f..8a35dc4 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_bitstream.h
@@ -30,6 +30,7 @@
 
 #include "vl_defines.h"
 #include "vl_vlc.h"
+#include "vl_vertex_buffers.h"
 
 struct vl_mpg12_bs
 {
@@ -39,18 +40,18 @@ struct vl_mpg12_bs
 
    unsigned *num_ycbcr_blocks;
 
-   struct pipe_ycbcr_block *ycbcr_stream[VL_MAX_PLANES];
+   struct vl_ycbcr_block *ycbcr_stream[VL_MAX_PLANES];
    short *ycbcr_buffer[VL_MAX_PLANES];
 
-   struct pipe_motionvector *mv_stream[VL_MAX_REF_FRAMES];
+   struct vl_motionvector *mv_stream[VL_MAX_REF_FRAMES];
 };
 
 void
 vl_mpg12_bs_init(struct vl_mpg12_bs *bs, unsigned width, unsigned height);
 
 void
-vl_mpg12_bs_set_buffers(struct vl_mpg12_bs *bs, struct pipe_ycbcr_block *ycbcr_stream[VL_MAX_PLANES],
-                        short *ycbcr_buffer[VL_MAX_PLANES], struct pipe_motionvector *mv_stream[VL_MAX_REF_FRAMES]);
+vl_mpg12_bs_set_buffers(struct vl_mpg12_bs *bs, struct vl_ycbcr_block *ycbcr_stream[VL_MAX_PLANES],
+                        short *ycbcr_buffer[VL_MAX_PLANES], struct vl_motionvector *mv_stream[VL_MAX_REF_FRAMES]);
 
 void
 vl_mpg12_bs_decode(struct vl_mpg12_bs *bs, unsigned num_bytes, const void *buffer,
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
index 228a386..97eee95 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.c
@@ -75,6 +75,12 @@ static const struct format_config mc_format_config[] = {
 static const unsigned num_mc_format_configs =
    sizeof(mc_format_config) / sizeof(struct format_config);
 
+static const unsigned const_empty_block_mask_420[3][2][2] = {
+   { { 0x20, 0x10 },  { 0x08, 0x04 } },
+   { { 0x02, 0x02 },  { 0x02, 0x02 } },
+   { { 0x01, 0x01 },  { 0x01, 0x01 } }
+};
+
 static bool
 init_zscan_buffer(struct vl_mpeg12_decoder *dec, struct vl_mpeg12_buffer *buffer)
 {
@@ -224,6 +230,148 @@ cleanup_mc_buffer(struct vl_mpeg12_buffer *buf)
       vl_mc_cleanup_buffer(&buf->mc[i]);
 }
 
+static inline void
+MacroBlockTypeToPipeWeights(const struct pipe_mpeg12_macroblock *mb, unsigned weights[2])
+{
+   assert(mb);
+
+   switch (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD)) {
+   case PIPE_MPEG12_MB_TYPE_MOTION_FORWARD:
+      weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
+      weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
+      break;
+
+   case (PIPE_MPEG12_MB_TYPE_MOTION_FORWARD | PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD):
+      weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
+      weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
+      break;
+
+   case PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD:
+      weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
+      weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
+      break;
+
+   default:
+      /* workaround for xines xxmc video out plugin */
+      if (!(mb->macroblock_type & ~PIPE_MPEG12_MB_TYPE_MOTION_PATTERN)) {
+         weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
+         weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
+      } else {
+         weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
+         weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
+      }
+      break;
+   }
+}
+
+static inline struct vl_motionvector
+MotionVectorToPipe(const struct pipe_mpeg12_macroblock *mb, unsigned vector,
+                   unsigned field_select_mask, unsigned weight)
+{
+   struct vl_motionvector mv;
+
+   assert(mb);
+
+   switch (mb->macroblock_modes.bits.frame_motion_type) {
+   case PIPE_MPEG12_MO_TYPE_FRAME:
+      mv.top.x = mb->PMV[0][vector][0];
+      mv.top.y = mb->PMV[0][vector][1];
+      mv.top.field_select = PIPE_VIDEO_FRAME;
+      mv.top.weight = weight;
+
+      mv.bottom.x = mb->PMV[0][vector][0];
+      mv.bottom.y = mb->PMV[0][vector][1];
+      mv.bottom.weight = weight;
+      mv.bottom.field_select = PIPE_VIDEO_FRAME;
+      break;
+
+   case PIPE_MPEG12_MO_TYPE_FIELD:
+      mv.top.x = mb->PMV[0][vector][0];
+      mv.top.y = mb->PMV[0][vector][1];
+      mv.top.field_select = (mb->motion_vertical_field_select & field_select_mask) ?
+         PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
+      mv.top.weight = weight;
+
+      mv.bottom.x = mb->PMV[1][vector][0];
+      mv.bottom.y = mb->PMV[1][vector][1];
+      mv.bottom.field_select = (mb->motion_vertical_field_select & (field_select_mask << 2)) ?
+         PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
+      mv.bottom.weight = weight;
+      break;
+
+   default: // TODO: Support DUALPRIME and 16x8
+      break;
+   }
+
+   return mv;
+}
+
+static inline void
+UploadYcbcrBlocks(struct vl_mpeg12_decoder *dec,
+                  struct vl_mpeg12_buffer *buf,
+                  const struct pipe_mpeg12_macroblock *mb)
+{
+   unsigned intra, mb_x, mb_y;
+   unsigned tb, x, y, luma_blocks;
+   short *blocks;
+
+   assert(dec && buf);
+   assert(mb);
+
+   if (!mb->coded_block_pattern)
+      return;
+
+   blocks = mb->blocks;
+   intra = mb->macroblock_type & PIPE_MPEG12_MB_TYPE_MOTION_INTRA ? 1 : 0;
+   mb_x = mb->macroblock_address % dec->width_in_macroblocks;
+   mb_y = mb->macroblock_address / dec->width_in_macroblocks;
+
+   for (y = 0, luma_blocks = 0; y < 2; ++y) {
+      for (x = 0; x < 2; ++x, ++tb) {
+         if (mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
+
+            struct vl_ycbcr_block *stream = buf->ycbcr_stream[0];
+            stream->x = mb_x * 2 + x;
+            stream->y = mb_y * 2 + y;
+            stream->intra = intra;
+            stream->coding = mb->macroblock_modes.bits.dct_type;
+
+            buf->num_ycbcr_blocks[0]++;
+            buf->ycbcr_stream[0]++;
+
+            luma_blocks++;
+         }
+      }
+   }
+
+   if (luma_blocks > 0) {
+      memcpy(buf->texels[0], blocks, 64 * sizeof(short) * luma_blocks);
+      buf->texels[0] += 64 * luma_blocks;
+      blocks += 64 * luma_blocks;
+   }
+
+   /* TODO: Implement 422, 444 */
+   //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
+
+   for (tb = 1; tb < 3; ++tb) {
+      if (mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
+
+         struct vl_ycbcr_block *stream = buf->ycbcr_stream[tb];
+         stream->x = mb_x;
+         stream->y = mb_y;
+         stream->intra = intra;
+         stream->coding = 0;
+
+         buf->num_ycbcr_blocks[tb]++;
+         buf->ycbcr_stream[tb]++;
+
+         memcpy(buf->texels[tb], blocks, 64 * sizeof(short));
+         buf->texels[tb] += 64;
+         blocks += 64;
+      }
+   }
+}
+
 static void
 vl_mpeg12_destroy(struct pipe_video_decoder *decoder)
 {
@@ -450,19 +598,19 @@ vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder)
       );
 
       buf->texels[i] = dec->base.context->transfer_map(dec->base.context, buf->tex_transfer[i]);
+
+      buf->num_ycbcr_blocks[i] = 0;
    }
 
-   if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
-      struct pipe_ycbcr_block *ycbcr_stream[VL_MAX_PLANES];
-      struct pipe_motionvector *mv_stream[VL_MAX_REF_FRAMES];
+   for (i = 0; i < VL_MAX_PLANES; ++i)
+      buf->ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
 
-      for (i = 0; i < VL_MAX_PLANES; ++i)
-         ycbcr_stream[i] = vl_vb_get_ycbcr_stream(&buf->vertex_stream, i);
+   for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
+      buf->mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
 
-      for (i = 0; i < VL_MAX_REF_FRAMES; ++i)
-         mv_stream[i] = vl_vb_get_mv_stream(&buf->vertex_stream, i);
+   if (dec->base.entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
+      vl_mpg12_bs_set_buffers(&buf->bs, buf->ycbcr_stream, buf->texels, buf->mv_stream);
 
-      vl_mpg12_bs_set_buffers(&buf->bs, ycbcr_stream, buf->texels, mv_stream);
    } else {
 
       for (i = 0; i < VL_MAX_PLANES; ++i)
@@ -470,52 +618,42 @@ vl_mpeg12_begin_frame(struct pipe_video_decoder *decoder)
    }
 }
 
-static struct pipe_ycbcr_block *
-vl_mpeg12_get_ycbcr_stream(struct pipe_video_decoder *decoder, int component)
+static void
+vl_mpeg12_decode_macroblock(struct pipe_video_decoder *decoder,
+                            const struct pipe_macroblock *macroblock)
 {
    struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+   const struct pipe_mpeg12_macroblock *mb = (const struct pipe_mpeg12_macroblock *)macroblock;
+   struct vl_mpeg12_buffer *buf;
 
-   assert(dec && dec->current_buffer);
-   assert(component < VL_MAX_PLANES);
-
-   return vl_vb_get_ycbcr_stream(&dec->current_buffer->vertex_stream, component);
-}
-
-static short *
-vl_mpeg12_get_ycbcr_buffer(struct pipe_video_decoder *decoder, int component)
-{
-   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+   unsigned i, mv_weights[2];
 
    assert(dec && dec->current_buffer);
-   assert(component < VL_MAX_PLANES);
-
-   return dec->current_buffer->texels[component];
-}
+   assert(macroblock && macroblock->codec == PIPE_VIDEO_CODEC_MPEG12);
 
-static unsigned
-vl_mpeg12_get_mv_stream_stride(struct pipe_video_decoder *decoder)
-{
-   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
-
-   assert(dec && dec->current_buffer);
+   buf = dec->current_buffer;
+   assert(buf);
 
-   return vl_vb_get_mv_stream_stride(&dec->current_buffer->vertex_stream);
-}
+   if (mb->macroblock_type & (PIPE_MPEG12_MB_TYPE_MOTION_PATTERN | PIPE_MPEG12_MB_TYPE_MOTION_INTRA))
+      UploadYcbcrBlocks(dec, buf, mb);
 
-static struct pipe_motionvector *
-vl_mpeg12_get_mv_stream(struct pipe_video_decoder *decoder, int ref_frame)
-{
-   struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
+   MacroBlockTypeToPipeWeights(mb, mv_weights);
 
-   assert(dec && dec->current_buffer);
+   for (i = 0; i < 2; ++i) {
+      if (!dec->ref_frames[i][0]) continue;
 
-   return vl_vb_get_mv_stream(&dec->current_buffer->vertex_stream, ref_frame);
+      buf->mv_stream[i][mb->macroblock_address] = MotionVectorToPipe
+      (
+         mb, i,
+         i ? PIPE_MPEG12_FS_FIRST_BACKWARD : PIPE_MPEG12_FS_FIRST_FORWARD,
+         mv_weights[i]
+      );
+   }
 }
 
 static void
 vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
-                           unsigned num_bytes, const void *data,
-                           unsigned num_ycbcr_blocks[3])
+                           unsigned num_bytes, const void *data)
 {
    struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
    struct vl_mpeg12_buffer *buf;
@@ -531,11 +669,11 @@ vl_mpeg12_decode_bitstream(struct pipe_video_decoder *decoder,
       vl_zscan_set_layout(&buf->zscan[i], dec->picture_desc.alternate_scan ?
                           dec->zscan_alternate : dec->zscan_normal);
 
-   vl_mpg12_bs_decode(&buf->bs, num_bytes, data, &dec->picture_desc, num_ycbcr_blocks);
+   vl_mpg12_bs_decode(&buf->bs, num_bytes, data, &dec->picture_desc, buf->num_ycbcr_blocks);
 }
 
 static void
-vl_mpeg12_end_frame(struct pipe_video_decoder *decoder, unsigned num_ycbcr_blocks[3])
+vl_mpeg12_end_frame(struct pipe_video_decoder *decoder)
 {
    struct vl_mpeg12_decoder *dec = (struct vl_mpeg12_decoder *)decoder;
    struct pipe_sampler_view **mc_source_sv;
@@ -579,15 +717,15 @@ vl_mpeg12_end_frame(struct pipe_video_decoder *decoder, unsigned num_ycbcr_block
 
    dec->base.context->bind_vertex_elements_state(dec->base.context, dec->ves_ycbcr);
    for (i = 0; i < VL_MAX_PLANES; ++i) {
-      if (!num_ycbcr_blocks[i]) continue;
+      if (!buf->num_ycbcr_blocks[i]) continue;
 
       vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
       dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
 
-      vl_zscan_render(&buf->zscan[i] , num_ycbcr_blocks[i]);
+      vl_zscan_render(&buf->zscan[i] , buf->num_ycbcr_blocks[i]);
 
       if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
-         vl_idct_flush(&buf->idct[i], num_ycbcr_blocks[i]);
+         vl_idct_flush(&buf->idct[i], buf->num_ycbcr_blocks[i]);
    }
 
    mc_source_sv = dec->mc_source->get_sampler_view_planes(dec->mc_source);
@@ -596,7 +734,7 @@ vl_mpeg12_end_frame(struct pipe_video_decoder *decoder, unsigned num_ycbcr_block
 
       nr_components = util_format_get_nr_components(dec->target_surfaces[i]->texture->format);
       for (j = 0; j < nr_components; ++j, ++component) {
-         if (!num_ycbcr_blocks[i]) continue;
+         if (!buf->num_ycbcr_blocks[i]) continue;
 
          vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, component);
          dec->base.context->set_vertex_buffers(dec->base.context, 3, vb);
@@ -607,7 +745,7 @@ vl_mpeg12_end_frame(struct pipe_video_decoder *decoder, unsigned num_ycbcr_block
             dec->base.context->set_fragment_sampler_views(dec->base.context, 1, &mc_source_sv[component]);
             dec->base.context->bind_fragment_sampler_states(dec->base.context, 1, &dec->sampler_ycbcr);
          }
-         vl_mc_render_ycbcr(&buf->mc[i], j, num_ycbcr_blocks[component]);
+         vl_mc_render_ycbcr(&buf->mc[i], j, buf->num_ycbcr_blocks[component]);
       }
    }
 }
@@ -893,16 +1031,14 @@ vl_create_mpeg12_decoder(struct pipe_context *context,
    dec->base.set_decode_target = vl_mpeg12_set_decode_target;
    dec->base.set_reference_frames = vl_mpeg12_set_reference_frames;
    dec->base.begin_frame = vl_mpeg12_begin_frame;
-   dec->base.get_ycbcr_stream = vl_mpeg12_get_ycbcr_stream;
-   dec->base.get_ycbcr_buffer = vl_mpeg12_get_ycbcr_buffer;
-   dec->base.get_mv_stream_stride = vl_mpeg12_get_mv_stream_stride;
-   dec->base.get_mv_stream = vl_mpeg12_get_mv_stream;
+   dec->base.decode_macroblock = vl_mpeg12_decode_macroblock;
    dec->base.decode_bitstream = vl_mpeg12_decode_bitstream;
    dec->base.end_frame = vl_mpeg12_end_frame;
    dec->base.flush = vl_mpeg12_flush;
 
    dec->blocks_per_line = MAX2(util_next_power_of_two(dec->base.width) / block_size_pixels, 4);
    dec->num_blocks = (dec->base.width * dec->base.height) / block_size_pixels;
+   dec->width_in_macroblocks = align(dec->base.width, MACROBLOCK_WIDTH) / MACROBLOCK_WIDTH;
 
    dec->quads = vl_vb_upload_quads(dec->base.context);
    dec->pos = vl_vb_upload_pos(
diff --git a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
index 85c84fc..277f5b9 100644
--- a/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
+++ b/src/gallium/auxiliary/vl/vl_mpeg12_decoder.h
@@ -49,6 +49,7 @@ struct vl_mpeg12_decoder
 
    unsigned blocks_per_line;
    unsigned num_blocks;
+   unsigned width_in_macroblocks;
 
    enum pipe_format zscan_source_format;
 
@@ -86,6 +87,8 @@ struct vl_mpeg12_buffer
 {
    struct vl_vertex_buffer vertex_stream;
 
+   unsigned num_ycbcr_blocks[3];
+
    struct pipe_video_buffer *zscan_source;
 
    struct vl_mpg12_bs bs;
@@ -95,6 +98,9 @@ struct vl_mpeg12_buffer
 
    struct pipe_transfer *tex_transfer[VL_MAX_PLANES];
    short *texels[VL_MAX_PLANES];
+
+   struct vl_ycbcr_block *ycbcr_stream[VL_MAX_PLANES];
+   struct vl_motionvector *mv_stream[VL_MAX_REF_FRAMES];
 };
 
 /**
diff --git a/src/gallium/auxiliary/vl/vl_vertex_buffers.c b/src/gallium/auxiliary/vl/vl_vertex_buffers.c
index c0f1449..b7aa14b 100644
--- a/src/gallium/auxiliary/vl/vl_vertex_buffers.c
+++ b/src/gallium/auxiliary/vl/vl_vertex_buffers.c
@@ -266,7 +266,7 @@ vl_vb_init(struct vl_vertex_buffer *buffer, struct pipe_context *pipe,
          pipe->screen,
          PIPE_BIND_VERTEX_BUFFER,
          PIPE_USAGE_STREAM,
-         sizeof(struct pipe_ycbcr_block) * size * 4
+         sizeof(struct vl_ycbcr_block) * size * 4
       );
       if (!buffer->ycbcr[i].resource)
          goto error_ycbcr;
@@ -278,7 +278,7 @@ vl_vb_init(struct vl_vertex_buffer *buffer, struct pipe_context *pipe,
          pipe->screen,
          PIPE_BIND_VERTEX_BUFFER,
          PIPE_USAGE_STREAM,
-         sizeof(struct pipe_motionvector) * size
+         sizeof(struct vl_motionvector) * size
       );
       if (!buffer->mv[i].resource)
          goto error_mv;
@@ -310,7 +310,7 @@ vl_vb_get_ycbcr(struct vl_vertex_buffer *buffer, int component)
 
    assert(buffer);
 
-   buf.stride = sizeof(struct pipe_ycbcr_block);
+   buf.stride = sizeof(struct vl_ycbcr_block);
    buf.buffer_offset = 0;
    buf.buffer = buffer->ycbcr[component].resource;
 
@@ -324,7 +324,7 @@ vl_vb_get_mv(struct vl_vertex_buffer *buffer, int motionvector)
 
    assert(buffer);
 
-   buf.stride = sizeof(struct pipe_motionvector);
+   buf.stride = sizeof(struct vl_motionvector);
    buf.buffer_offset = 0;
    buf.buffer = buffer->mv[motionvector].resource;
 
@@ -360,7 +360,7 @@ vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe)
 
 }
 
-struct pipe_ycbcr_block *
+struct vl_ycbcr_block *
 vl_vb_get_ycbcr_stream(struct vl_vertex_buffer *buffer, int component)
 {
    assert(buffer);
@@ -377,7 +377,7 @@ vl_vb_get_mv_stream_stride(struct vl_vertex_buffer *buffer)
    return buffer->width;
 }
 
-struct pipe_motionvector *
+struct vl_motionvector *
 vl_vb_get_mv_stream(struct vl_vertex_buffer *buffer, int ref_frame)
 {
    assert(buffer);
diff --git a/src/gallium/auxiliary/vl/vl_vertex_buffers.h b/src/gallium/auxiliary/vl/vl_vertex_buffers.h
index 74845a4..38db899 100644
--- a/src/gallium/auxiliary/vl/vl_vertex_buffers.h
+++ b/src/gallium/auxiliary/vl/vl_vertex_buffers.h
@@ -52,20 +52,55 @@ enum VS_INPUT
    NUM_VS_INPUTS = 4
 };
 
+enum vl_mv_weight
+{
+   PIPE_VIDEO_MV_WEIGHT_MIN = 0,
+   PIPE_VIDEO_MV_WEIGHT_HALF = 128,
+   PIPE_VIDEO_MV_WEIGHT_MAX = 256
+};
+
+enum vl_field_select
+{
+   PIPE_VIDEO_FRAME = 0,
+   PIPE_VIDEO_TOP_FIELD = 1,
+   PIPE_VIDEO_BOTTOM_FIELD = 3,
+
+   /* TODO
+   PIPE_VIDEO_DUALPRIME
+   PIPE_VIDEO_16x8
+   */
+};
+
+struct vl_motionvector
+{
+   struct {
+      int16_t x, y;
+      int16_t field_select; /**< enum pipe_video_field_select */
+      int16_t weight;  /**< enum pipe_video_mv_weight  */
+   } top, bottom;
+};
+
+struct vl_ycbcr_block
+{
+   uint8_t x, y;
+   uint8_t intra;
+   uint8_t coding;
+};
+
 struct vl_vertex_buffer
 {
    unsigned width, height;
 
    struct {
-      struct pipe_resource    *resource;
-      struct pipe_transfer    *transfer;
-      struct pipe_ycbcr_block *vertex_stream;
+      struct pipe_resource  *resource;
+      struct pipe_transfer  *transfer;
+      struct vl_ycbcr_block *vertex_stream;
    } ycbcr[VL_MAX_PLANES];
 
    struct {
-      struct pipe_resource     *resource;
-      struct pipe_transfer     *transfer;
-      struct pipe_motionvector *vertex_stream;
+      struct pipe_resource   *resource;
+      struct pipe_transfer   *transfer;
+      struct vl_motionvector *vertex_stream;
    } mv[VL_MAX_REF_FRAMES];
 };
 
@@ -89,13 +124,13 @@ void vl_vb_map(struct vl_vertex_buffer *buffer, struct pipe_context *pipe);
 
 struct pipe_vertex_buffer vl_vb_get_ycbcr(struct vl_vertex_buffer *buffer, int component);
 
-struct pipe_ycbcr_block *vl_vb_get_ycbcr_stream(struct vl_vertex_buffer *buffer, int component);
+struct vl_ycbcr_block *vl_vb_get_ycbcr_stream(struct vl_vertex_buffer *buffer, int component);
 
 struct pipe_vertex_buffer vl_vb_get_mv(struct vl_vertex_buffer *buffer, int ref_frame);
 
 unsigned vl_vb_get_mv_stream_stride(struct vl_vertex_buffer *buffer);
 
-struct pipe_motionvector *vl_vb_get_mv_stream(struct vl_vertex_buffer *buffer, int ref_frame);
+struct vl_motionvector *vl_vb_get_mv_stream(struct vl_vertex_buffer *buffer, int ref_frame);
 
 void vl_vb_unmap(struct vl_vertex_buffer *buffer, struct pipe_context *pipe);
 
diff --git a/src/gallium/include/pipe/p_video_decoder.h b/src/gallium/include/pipe/p_video_decoder.h
index ae07113..0ffb32c 100644
--- a/src/gallium/include/pipe/p_video_decoder.h
+++ b/src/gallium/include/pipe/p_video_decoder.h
@@ -106,36 +106,21 @@ struct pipe_video_decoder
    void (*begin_frame)(struct pipe_video_decoder *decoder);
 
    /**
-    * get the pointer where to put the ycbcr blocks of a component
+    * decode a macroblock
     */
-   struct pipe_ycbcr_block *(*get_ycbcr_stream)(struct pipe_video_decoder *decoder, int component);
-
-   /**
-    * get the pointer where to put the ycbcr dct block data of a component
-    */
-   short *(*get_ycbcr_buffer)(struct pipe_video_decoder *decoder, int component);
-
-   /**
-    * get the stride of the mv buffer
-    */
-   unsigned (*get_mv_stream_stride)(struct pipe_video_decoder *decoder);
-
-   /**
-    * get the pointer where to put the motion vectors of a ref frame
-    */
-   struct pipe_motionvector *(*get_mv_stream)(struct pipe_video_decoder *decoder, int ref_frame);
+   void (*decode_macroblock)(struct pipe_video_decoder *decoder,
+                             const struct pipe_macroblock *macroblock);
 
    /**
     * decode a bitstream
     */
    void (*decode_bitstream)(struct pipe_video_decoder *decoder,
-                            unsigned num_bytes, const void *data,
-                            unsigned num_ycbcr_blocks[3]);
+                            unsigned num_bytes, const void *data);
 
    /**
     * end decoding of the current frame
     */
-   void (*end_frame)(struct pipe_video_decoder *decoder, unsigned num_ycbcr_blocks[3]);
+   void (*end_frame)(struct pipe_video_decoder *decoder);
 
    /**
     * flush any outstanding command buffers to the hardware
diff --git a/src/gallium/include/pipe/p_video_state.h b/src/gallium/include/pipe/p_video_state.h
index 2a64ffb..dfd102b 100644
--- a/src/gallium/include/pipe/p_video_state.h
+++ b/src/gallium/include/pipe/p_video_state.h
@@ -50,58 +50,54 @@ enum pipe_mpeg12_picture_type
    PIPE_MPEG12_PICTURE_TYPE_FRAME
 };
 
-enum pipe_mpeg12_dct_intra
+/*
+ * flags for macroblock_type, see section 6.3.17.1 in the spec
+ */
+enum pipe_mpeg12_macroblock_type
 {
-   PIPE_MPEG12_DCT_DELTA = 0,
-   PIPE_MPEG12_DCT_INTRA = 1
+   PIPE_MPEG12_MB_TYPE_MOTION_FORWARD = 0x02,
+   PIPE_MPEG12_MB_TYPE_MOTION_BACKWARD = 0x04,
+   PIPE_MPEG12_MB_TYPE_MOTION_PATTERN = 0x08,
+   PIPE_MPEG12_MB_TYPE_MOTION_INTRA = 0x10
 };
 
-enum pipe_mpeg12_dct_type
+/*
+ * flags for motion_type, see table 6-17 and 6-18 in the spec
+ */
+enum pipe_mpeg12_motion_type
 {
-   PIPE_MPEG12_DCT_TYPE_FRAME = 0,
-   PIPE_MPEG12_DCT_TYPE_FIELD = 1
+   PIPE_MPEG12_MO_TYPE_RESERVED = 0x00,
+   PIPE_MPEG12_MO_TYPE_FIELD = 0x01,
+   PIPE_MPEG12_MO_TYPE_FRAME = 0x02,
+   PIPE_MPEG12_MO_TYPE_16x8 = 0x02,
+   PIPE_MPEG12_MO_TYPE_DUAL_PRIME = 0x03
 };
 
-enum pipe_video_field_select
-{
-   PIPE_VIDEO_FRAME = 0,
-   PIPE_VIDEO_TOP_FIELD = 1,
-   PIPE_VIDEO_BOTTOM_FIELD = 3,
-
-   /* TODO
-   PIPE_VIDEO_DUALPRIME
-   PIPE_VIDEO_16x8
-   */
-};
-
-enum pipe_video_mv_weight
+/*
+ * see section 6.3.17.1 and table 6-19 in the spec
+ */
+enum pipe_mpeg12_dct_type
 {
-   PIPE_VIDEO_MV_WEIGHT_MIN = 0,
-   PIPE_VIDEO_MV_WEIGHT_HALF = 128,
-   PIPE_VIDEO_MV_WEIGHT_MAX = 256
+   PIPE_MPEG12_DCT_TYPE_FRAME = 0,
+   PIPE_MPEG12_DCT_TYPE_FIELD = 1
 };
 
-/* bitfields because this is used as a vertex buffer element */
-struct pipe_motionvector
+enum pipe_mpeg12_field_select
 {
-   struct {
-      short x, y;
-      ushort field_select; /**< enum pipe_video_field_select */
-      ushort weight;  /**< enum pipe_video_mv_weight  */
-   } top, bottom;
+   PIPE_MPEG12_FS_FIRST_FORWARD = 0x01,
+   PIPE_MPEG12_FS_FIRST_BACKWARD = 0x02,
+   PIPE_MPEG12_FS_SECOND_FORWARD = 0x04,
+   PIPE_MPEG12_FS_SECOND_BACKWARD = 0x08
 };
 
-/* bitfields because this is used as a vertex buffer element */
-struct pipe_ycbcr_block
+struct pipe_picture_desc
 {
-   ubyte x, y;
-   ubyte intra;  /**< enum pipe_mpeg12_dct_intra */
-   ubyte coding; /**< enum pipe_mpeg12_dct_type */
+   enum pipe_video_profile profile;
 };
 
-struct pipe_picture_desc
+struct pipe_macroblock
 {
-   enum pipe_video_profile profile;
+   enum pipe_video_codec codec;
 };
 
 struct pipe_mpeg12_picture_desc
@@ -118,6 +114,46 @@ struct pipe_mpeg12_picture_desc
    unsigned f_code[2][2];
 };
 
+struct pipe_mpeg12_macroblock
+{
+   struct pipe_macroblock base;
+
+   /* see section 6.3.17 in the spec */
+   unsigned short macroblock_address;
+
+   /* see section 6.3.17.1 in the spec */
+   unsigned char macroblock_type;
+
+   union {
+      struct {
+         /* see table 6-17 in the spec */
+         unsigned int frame_motion_type:2;
+
+         /* see table 6-18 in the spec */
+         unsigned int field_motion_type:2;
+
+         /* see table 6-19 in the spec */
+         unsigned int dct_type:1;
+      } bits;
+      unsigned int value;
+   } macroblock_modes;
+
+    /* see section 6.3.17.2 in the spec */
+   unsigned char motion_vertical_field_select;
+
+   /* see Table 7-7 in the spec */
+   short PMV[2][2][2];
+
+   /* see figure 6.10-12 in the spec */
+   unsigned short coded_block_pattern;
+
+   /* see figure 6.10-12 in the spec */
+   short *blocks;
+
+   /* Number of skipped macroblocks after this macroblock */
+   unsigned short num_skipped_macroblocks;
+};
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/src/gallium/state_trackers/vdpau/decode.c b/src/gallium/state_trackers/vdpau/decode.c
index 3527f73..5ca40f7 100644
--- a/src/gallium/state_trackers/vdpau/decode.c
+++ b/src/gallium/state_trackers/vdpau/decode.c
@@ -184,7 +184,6 @@ vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder,
    struct pipe_mpeg12_picture_desc picture;
    struct pipe_video_buffer *ref_frames[2];
    uint8_t intra_quantizer_matrix[64];
-   unsigned num_ycbcr_blocks[3] = { 0, 0, 0 };
    unsigned i;
 
    VDPAU_MSG(VDPAU_TRACE, "[VDPAU] Decoding MPEG2\n");
@@ -232,9 +231,9 @@ vlVdpDecoderRenderMpeg12(struct pipe_video_decoder *decoder,
 
    for (i = 0; i < bitstream_buffer_count; ++i)
       decoder->decode_bitstream(decoder, bitstream_buffers[i].bitstream_bytes,
-                                bitstream_buffers[i].bitstream, num_ycbcr_blocks);
+                                bitstream_buffers[i].bitstream);
 
-   decoder->end_frame(decoder, num_ycbcr_blocks);
+   decoder->end_frame(decoder);
 
    return VDP_STATUS_OK;
 }
diff --git a/src/gallium/state_trackers/xorg/xvmc/context.c b/src/gallium/state_trackers/xorg/xvmc/context.c
index f21ebda..0aa1a01 100644
--- a/src/gallium/state_trackers/xorg/xvmc/context.c
+++ b/src/gallium/state_trackers/xorg/xvmc/context.c
@@ -285,6 +285,7 @@ Status XvMCCreateContext(Display *dpy, XvPortID port, int surface_type_id,
    context_priv->vctx = vctx;
    context_priv->subpicture_max_width = subpic_max_w;
    context_priv->subpicture_max_height = subpic_max_h;
+   context_priv->width_in_macroblocks = align(width, 16) / 16;
 
    context->context_id = XAllocID(dpy);
    context->surface_type_id = surface_type_id;
diff --git a/src/gallium/state_trackers/xorg/xvmc/surface.c b/src/gallium/state_trackers/xorg/xvmc/surface.c
index 002c35a..6126940 100644
--- a/src/gallium/state_trackers/xorg/xvmc/surface.c
+++ b/src/gallium/state_trackers/xorg/xvmc/surface.c
@@ -42,12 +42,6 @@
 
 #include "xvmc_private.h"
 
-static const unsigned const_empty_block_mask_420[3][2][2] = {
-   { { 0x20, 0x10 },  { 0x08, 0x04 } },
-   { { 0x02, 0x02 },  { 0x02, 0x02 } },
-   { { 0x01, 0x01 },  { 0x01, 0x01 } }
-};
-
 static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
 {
    switch (xvmc_pic) {
@@ -66,187 +60,56 @@ static enum pipe_mpeg12_picture_type PictureToPipe(int xvmc_pic)
    return -1;
 }
 
-static inline void
-MacroBlockTypeToPipeWeights(const XvMCMacroBlock *xvmc_mb, unsigned weights[2])
-{
-   assert(xvmc_mb);
-
-   switch (xvmc_mb->macroblock_type & (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD)) {
-   case XVMC_MB_TYPE_MOTION_FORWARD:
-      weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
-      weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
-      break;
-
-   case (XVMC_MB_TYPE_MOTION_FORWARD | XVMC_MB_TYPE_MOTION_BACKWARD):
-      weights[0] = PIPE_VIDEO_MV_WEIGHT_HALF;
-      weights[1] = PIPE_VIDEO_MV_WEIGHT_HALF;
-      break;
-
-   case XVMC_MB_TYPE_MOTION_BACKWARD:
-      weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
-      weights[1] = PIPE_VIDEO_MV_WEIGHT_MAX;
-      break;
-
-   default:
-      /* workaround for xines xxmc video out plugin */
-      if (!(xvmc_mb->macroblock_type & ~XVMC_MB_TYPE_PATTERN)) {
-         weights[0] = PIPE_VIDEO_MV_WEIGHT_MAX;
-         weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
-      } else {
-         weights[0] = PIPE_VIDEO_MV_WEIGHT_MIN;
-         weights[1] = PIPE_VIDEO_MV_WEIGHT_MIN;
-      }
-      break;
-   }
-}
-
-static inline struct pipe_motionvector
-MotionVectorToPipe(const XvMCMacroBlock *xvmc_mb, unsigned vector,
-                   unsigned field_select_mask, unsigned weight)
-{
-   struct pipe_motionvector mv;
-
-   assert(xvmc_mb);
-
-   switch (xvmc_mb->motion_type) {
-   case XVMC_PREDICTION_FRAME:
-      mv.top.x = xvmc_mb->PMV[0][vector][0];
-      mv.top.y = xvmc_mb->PMV[0][vector][1];
-      mv.top.field_select = PIPE_VIDEO_FRAME;
-      mv.top.weight = weight;
-
-      mv.bottom.x = xvmc_mb->PMV[0][vector][0];
-      mv.bottom.y = xvmc_mb->PMV[0][vector][1];
-      mv.bottom.weight = weight;
-      mv.bottom.field_select = PIPE_VIDEO_FRAME;
-      break;
-
-   case XVMC_PREDICTION_FIELD:
-      mv.top.x = xvmc_mb->PMV[0][vector][0];
-      mv.top.y = xvmc_mb->PMV[0][vector][1];
-      mv.top.field_select = (xvmc_mb->motion_vertical_field_select & field_select_mask) ?
-         PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
-      mv.top.weight = weight;
-
-      mv.bottom.x = xvmc_mb->PMV[1][vector][0];
-      mv.bottom.y = xvmc_mb->PMV[1][vector][1];
-      mv.bottom.field_select = (xvmc_mb->motion_vertical_field_select & (field_select_mask << 2)) ?
-         PIPE_VIDEO_BOTTOM_FIELD : PIPE_VIDEO_TOP_FIELD;
-      mv.bottom.weight = weight;
-      break;
-
-   default: // TODO: Support DUALPRIME and 16x8
-      break;
-   }
-
-   return mv;
-}
-
-static inline void
-UploadYcbcrBlocks(XvMCSurfacePrivate *surface,
-                  const XvMCMacroBlock *xvmc_mb,
-                  const XvMCBlockArray *xvmc_blocks)
-{
-   enum pipe_mpeg12_dct_intra intra;
-   enum pipe_mpeg12_dct_type coding;
-
-   unsigned tb, x, y, luma_blocks;
-   short *blocks;
-
-   assert(surface);
-   assert(xvmc_mb);
-
-   if (!xvmc_mb->coded_block_pattern)
-      return;
-
-   intra = xvmc_mb->macroblock_type & XVMC_MB_TYPE_INTRA ?
-           PIPE_MPEG12_DCT_INTRA : PIPE_MPEG12_DCT_DELTA;
-
-   coding = xvmc_mb->dct_type == XVMC_DCT_TYPE_FIELD ?
-            PIPE_MPEG12_DCT_TYPE_FIELD : PIPE_MPEG12_DCT_TYPE_FRAME;
-
-   blocks = xvmc_blocks->blocks + xvmc_mb->index * BLOCK_SIZE_SAMPLES;
-
-   for (y = 0, luma_blocks = 0; y < 2; ++y) {
-      for (x = 0; x < 2; ++x, ++tb) {
-         if (xvmc_mb->coded_block_pattern & const_empty_block_mask_420[0][y][x]) {
-
-            struct pipe_ycbcr_block *stream = surface->ycbcr[0].stream;
-            stream->x = xvmc_mb->x * 2 + x;
-            stream->y = xvmc_mb->y * 2 + y;
-            stream->intra = intra;
-            stream->coding = coding;
-
-            surface->ycbcr[0].num_blocks_added++;
-            surface->ycbcr[0].stream++;
-
-            luma_blocks++;
-         }
-      }
-   }
-
-   if (luma_blocks > 0) {
-      memcpy(surface->ycbcr[0].buffer, blocks, BLOCK_SIZE_BYTES * luma_blocks);
-      surface->ycbcr[0].buffer += BLOCK_SIZE_SAMPLES * luma_blocks;
-      blocks += BLOCK_SIZE_SAMPLES * luma_blocks;
-   }
-
-   /* TODO: Implement 422, 444 */
-   //assert(ctx->base.chroma_format == PIPE_VIDEO_CHROMA_FORMAT_420);
-
-   for (tb = 1; tb < 3; ++tb) {
-      if (xvmc_mb->coded_block_pattern & const_empty_block_mask_420[tb][0][0]) {
-
-         struct pipe_ycbcr_block *stream = surface->ycbcr[tb].stream;
-         stream->x = xvmc_mb->x;
-         stream->y = xvmc_mb->y;
-         stream->intra = intra;
-         stream->coding = PIPE_MPEG12_DCT_TYPE_FRAME;
-
-         memcpy(surface->ycbcr[tb].buffer, blocks, BLOCK_SIZE_BYTES);
-
-         surface->ycbcr[tb].num_blocks_added++;
-         surface->ycbcr[tb].stream++;
-         surface->ycbcr[tb].buffer += BLOCK_SIZE_SAMPLES;
-         blocks += BLOCK_SIZE_SAMPLES;
-      }
-   }
-
-}
-
 static void
-MacroBlocksToPipe(XvMCSurfacePrivate *surface,
+MacroBlocksToPipe(XvMCContextPrivate *context,
+                  XvMCSurfacePrivate *surface,
                   unsigned int xvmc_picture_structure,
                   const XvMCMacroBlock *xvmc_mb,
                   const XvMCBlockArray *xvmc_blocks,
                   unsigned int num_macroblocks)
 {
-   unsigned int i, j;
+   struct pipe_mpeg12_macroblock mb;
+   unsigned int i, j, k;
 
    assert(xvmc_mb);
    assert(xvmc_blocks);
    assert(num_macroblocks);
 
-   for (i = 0; i < num_macroblocks; ++i) {
-      unsigned mv_pos = xvmc_mb->x + surface->mv_stride * xvmc_mb->y;
-      unsigned mv_weights[2];
-
-      if (xvmc_mb->macroblock_type & (XVMC_MB_TYPE_PATTERN | XVMC_MB_TYPE_INTRA))
-         UploadYcbcrBlocks(surface, xvmc_mb, xvmc_blocks);
+   mb.base.codec = PIPE_VIDEO_CODEC_MPEG12;
+   for (; num_macroblocks > 0; --num_macroblocks) {
+      mb.macroblock_address = xvmc_mb->x + context->width_in_macroblocks * xvmc_mb->y;
+      mb.macroblock_type = xvmc_mb->macroblock_type;
 
-      MacroBlockTypeToPipeWeights(xvmc_mb, mv_weights);
+      switch (xvmc_picture_structure) {
+      case XVMC_FRAME_PICTURE:
+         mb.macroblock_modes.bits.frame_motion_type = xvmc_mb->motion_type;
+         mb.macroblock_modes.bits.field_motion_type = 0;
+         break;
 
-      for (j = 0; j < 2; ++j) {
-         if (!surface->ref[j].mv) continue;
+      case XVMC_TOP_FIELD:
+      case XVMC_BOTTOM_FIELD:
+         mb.macroblock_modes.bits.frame_motion_type = 0;
+         mb.macroblock_modes.bits.field_motion_type = xvmc_mb->motion_type;
+         break;
 
-         surface->ref[j].mv[mv_pos] = MotionVectorToPipe
-         (
-            xvmc_mb, j,
-            j ? XVMC_SELECT_FIRST_BACKWARD : XVMC_SELECT_FIRST_FORWARD,
-            mv_weights[j]
-         );
+      default:
+         assert(0);
       }
 
+      mb.macroblock_modes.bits.dct_type = xvmc_mb->dct_type;
+      mb.motion_vertical_field_select = xvmc_mb->motion_vertical_field_select;
+
+      for (i = 0; i < 2; ++i)
+         for (j = 0; j < 2; ++j)
+            for (k = 0; k < 2; ++k)
+               mb.PMV[i][j][k] = xvmc_mb->PMV[i][j][k];
+
+      mb.coded_block_pattern = xvmc_mb->coded_block_pattern;
+      mb.blocks = xvmc_blocks->blocks + xvmc_mb->index * BLOCK_SIZE_SAMPLES;
+      mb.num_skipped_macroblocks = 0;
+
+      context->decoder->decode_macroblock(context->decoder, &mb.base);
+
       ++xvmc_mb;
    }
 }
@@ -270,8 +133,8 @@ SetDecoderStatus(XvMCSurfacePrivate *surface)
    decoder->set_decode_target(decoder, surface->video_buffer);
 
    for (i = 0; i < 2; ++i) {
-      if (surface->ref[i].surface) {
-         XvMCSurfacePrivate *ref = surface->ref[i].surface->privData;
+      if (surface->ref[i]) {
+         XvMCSurfacePrivate *ref = surface->ref[i]->privData;
 
          if (ref)
             ref_frames[num_refs++] = ref->video_buffer;
@@ -284,21 +147,21 @@ static void
 RecursiveEndFrame(XvMCSurfacePrivate *surface)
 {
    XvMCContextPrivate *context_priv;
-   unsigned i, num_ycbcr_blocks[3];
+   unsigned i;
 
    assert(surface);
 
    context_priv = surface->context->privData;
 
    for ( i = 0; i < 2; ++i ) {
-      if (surface->ref[i].surface) {
-         XvMCSurface *ref = surface->ref[i].surface;
+      if (surface->ref[i]) {
+         XvMCSurface *ref = surface->ref[i];
 
          assert(ref);
 
-         surface->ref[i].surface = NULL;
+         surface->ref[i] = NULL;
          RecursiveEndFrame(ref->privData);
-         surface->ref[i].surface = ref;
+         surface->ref[i] = ref;
       }
    }
 
@@ -306,13 +169,10 @@ RecursiveEndFrame(XvMCSurfacePrivate *surface)
       surface->frame_started = 0;
       SetDecoderStatus(surface);
 
-      for (i = 0; i < 3; ++i)
-         num_ycbcr_blocks[i] = surface->ycbcr[i].num_blocks_added;
-
       for (i = 0; i < 2; ++i)
-         surface->ref[i].surface = NULL;
+         surface->ref[i] = NULL;
 
-      context_priv->decoder->end_frame(context_priv->decoder, num_ycbcr_blocks);
+      context_priv->decoder->end_frame(context_priv->decoder);
    }
 }
 
@@ -389,8 +249,6 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
    XvMCSurfacePrivate *future_surface_priv;
    XvMCMacroBlock *xvmc_mb;
 
-   unsigned i;
-
    XVMC_MSG(XVMC_TRACE, "[XvMC] Rendering to surface %p, with past %p and future %p\n",
             target_surface, past_surface, future_surface);
 
@@ -443,40 +301,25 @@ Status XvMCRenderSurface(Display *dpy, XvMCContext *context, unsigned int pictur
 
    /* If the surface we're rendering hasn't changed the ref frames shouldn't change. */
    if (target_surface_priv->frame_started && (
-       target_surface_priv->ref[0].surface != past_surface ||
-       target_surface_priv->ref[1].surface != future_surface ||
+       target_surface_priv->ref[0] != past_surface ||
+       target_surface_priv->ref[1] != future_surface ||
        (xvmc_mb->x == 0 && xvmc_mb->y == 0))) {
 
       // If they change anyway we must assume that the current frame is ended
       RecursiveEndFrame(target_surface_priv);
    }
 
-   target_surface_priv->ref[0].surface = past_surface;
-   target_surface_priv->ref[1].surface = future_surface;
+   target_surface_priv->ref[0] = past_surface;
+   target_surface_priv->ref[1] = future_surface;
 
    SetDecoderStatus(target_surface_priv);
 
    if (!target_surface_priv->frame_started) {
-      decoder->begin_frame(decoder);
-
-      target_surface_priv->mv_stride = decoder->get_mv_stream_stride(decoder);
-      for (i = 0; i < 3; ++i) {
-         target_surface_priv->ycbcr[i].num_blocks_added = 0;
-         target_surface_priv->ycbcr[i].stream = decoder->get_ycbcr_stream(decoder, i);
-         target_surface_priv->ycbcr[i].buffer = decoder->get_ycbcr_buffer(decoder, i);
-      }
-
-      for (i = 0; i < 2; ++i) {
-         if (target_surface_priv->ref[i].surface)
-            target_surface_priv->ref[i].mv = decoder->get_mv_stream(decoder, i);
-         else
-            target_surface_priv->ref[i].mv = NULL;
-      }
-
       target_surface_priv->frame_started = 1;
+      decoder->begin_frame(decoder);
    }
 
-   MacroBlocksToPipe(target_surface_priv, picture_structure, xvmc_mb, blocks, num_macroblocks);
+   MacroBlocksToPipe(context_priv, target_surface_priv, picture_structure, xvmc_mb, blocks, num_macroblocks);
 
    XVMC_MSG(XVMC_TRACE, "[XvMC] Submitted surface %p for rendering.\n", target_surface);
 
@@ -665,8 +508,6 @@ Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
    XvMCSurfacePrivate *surface_priv;
    XvMCContextPrivate *context_priv;
 
-   unsigned num_ycbcr_buffers[3] = { 0, 0, 0 };
-
    XVMC_MSG(XVMC_TRACE, "[XvMC] Destroying surface %p.\n", surface);
 
    assert(dpy);
@@ -679,7 +520,7 @@ Status XvMCDestroySurface(Display *dpy, XvMCSurface *surface)
    
    if (surface_priv->frame_started) {
       SetDecoderStatus(surface_priv);
-      context_priv->decoder->end_frame(context_priv->decoder, num_ycbcr_buffers);
+      context_priv->decoder->end_frame(context_priv->decoder);
    }
    context_priv->decoder->destroy_buffer(context_priv->decoder, surface_priv->decode_buffer);
    surface_priv->video_buffer->destroy(surface_priv->video_buffer);
diff --git a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
index 5b3debd..b73f881 100644
--- a/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
+++ b/src/gallium/state_trackers/xorg/xvmc/xvmc_private.h
@@ -61,6 +61,7 @@ typedef struct
 
    unsigned short subpicture_max_width;
    unsigned short subpicture_max_height;
+   unsigned width_in_macroblocks;
 
    struct pipe_video_rect dst_rect;
    struct pipe_surface *drawable_surface;
@@ -75,17 +76,7 @@ typedef struct
    // have we allready told the decoder to start a frame
    bool frame_started;
 
-   struct {
-      unsigned num_blocks_added;
-      struct pipe_ycbcr_block *stream;
-      short *buffer;
-   } ycbcr[3];
-
-   unsigned mv_stride;
-   struct {
-      XvMCSurface *surface;
-      struct pipe_motionvector *mv;
-   } ref[2];
+   XvMCSurface *ref[2];
 
    struct pipe_fence_handle *fence;
 
-- 
1.7.4.1


--=-E2LaaR1qGLHh7xkpYB8v
Content-Disposition: attachment; filename="0004-g3dvl-Rework-the-decoder-interface-part-4-6.patch"
Content-Type: text/x-patch; name="0004-g3dvl-Rework-the-decoder-interface-part-4-6.patch"; charset="UTF-8"
Content-Transfer-Encoding: 7bit



More information about the mesa-dev mailing list