[Mesa-dev] [PATCH v2 1/2] st/va: if h264 then delay decoder creation until max_references is known
Christian König
christian.koenig at amd.com
Wed Nov 25 01:51:32 PST 2015
On 25.11.2015 10:12, Julien Isorce wrote:
> For commit message please read:
>
> "HEVC case is left unchanged since delaying decoder creation is not
> needed on AMD hardware."
In this case please update the commit message, but honestly I'm not sure
if we don't use the max_references somewhere in the DPB calculation for
HEVC.
Some more comments below.
>
> instead of
>
> "XXX: do the same for HEVC"
>
> On 25 November 2015 at 09:07, Julien Isorce <julien.isorce at gmail.com
> <mailto:julien.isorce at gmail.com>> wrote:
>
> From: Julien Isorce <julien.isorce at gmail.com
> <mailto:julien.isorce at gmail.com>>
>
> In general max_references cannot be based on num_render_targets.
>
> This patch allow to allocate accurate sizes for buffers.
> For other codecs it is a fixed value to 2.
>
> This is similar behaviour as vaapi/vdpau-driver.
>
> XXX: do the same for HEVC
>
> Signed-off-by: Julien Isorce <j.isorce at samsung.com
> <mailto:j.isorce at samsung.com>>
> ---
> src/gallium/state_trackers/va/context.c | 41
> ++++++++++++++--------------
> src/gallium/state_trackers/va/picture.c | 37
> ++++++++++++++++++-------
> src/gallium/state_trackers/va/picture_h264.c | 29
> +++++++++++++++++++-
> src/gallium/state_trackers/va/va_private.h | 4 +--
> 4 files changed, 78 insertions(+), 33 deletions(-)
>
> diff --git a/src/gallium/state_trackers/va/context.c
> b/src/gallium/state_trackers/va/context.c
> index f0051e5..985007b 100644
> --- a/src/gallium/state_trackers/va/context.c
> +++ b/src/gallium/state_trackers/va/context.c
> @@ -187,7 +187,6 @@ vlVaCreateContext(VADriverContextP ctx,
> VAConfigID config_id, int picture_width,
> int picture_height, int flag, VASurfaceID
> *render_targets,
> int num_render_targets, VAContextID *context_id)
> {
> - struct pipe_video_codec templat = {};
> vlVaDriver *drv;
> vlVaContext *context;
> int is_vpp;
> @@ -213,27 +212,28 @@ vlVaCreateContext(VADriverContextP ctx,
> VAConfigID config_id, int picture_width,
> return VA_STATUS_ERROR_INVALID_CONTEXT;
> }
> } else {
> - templat.profile = config_id;
> - templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
> - templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
> - templat.width = picture_width;
> - templat.height = picture_height;
> - templat.max_references = num_render_targets;
> - templat.expect_chunked_decode = true;
> -
> - if (u_reduce_video_profile(templat.profile) ==
> - PIPE_VIDEO_FORMAT_MPEG4_AVC)
> - templat.level = u_get_h264_level(templat.width,
> templat.height,
> - &templat.max_references);
> -
> - context->decoder = drv->pipe->create_video_codec(drv->pipe,
> &templat);
> - if (!context->decoder) {
> - FREE(context);
> - return VA_STATUS_ERROR_ALLOCATION_FAILED;
> + context->templat.profile = config_id;
> + context->templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
> + context->templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
> + context->templat.width = picture_width;
> + context->templat.height = picture_height;
> + context->templat.max_references = 2;
> + context->templat.expect_chunked_decode = true;
> +
> + /* Can only create decoders for which max_references is
> known. */
> + if (u_reduce_video_profile(context->templat.profile) !=
> + PIPE_VIDEO_FORMAT_MPEG4_AVC) {
> + context->decoder = drv->pipe->create_video_codec(drv->pipe,
> + &context->templat);
> + if (!context->decoder) {
> + FREE(context);
> + return VA_STATUS_ERROR_ALLOCATION_FAILED;
> + }
> }
>
> - if (u_reduce_video_profile(context->decoder->profile) ==
> + if (u_reduce_video_profile(context->templat.profile) ==
> PIPE_VIDEO_FORMAT_MPEG4_AVC) {
>
Please join this check with the one above, maybe even make this a switch
statement.
Apart from that looks good to me.
> + context->templat.max_references = 0;
> context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
> if (!context->desc.h264.pps) {
> FREE(context);
> @@ -247,8 +247,9 @@ vlVaCreateContext(VADriverContextP ctx,
> VAConfigID config_id, int picture_width,
> }
> }
>
> - if (u_reduce_video_profile(context->decoder->profile) ==
> + if (u_reduce_video_profile(context->templat.profile) ==
> PIPE_VIDEO_FORMAT_HEVC) {
> + context->templat.max_references = num_render_targets;
> context->desc.h265.pps = CALLOC_STRUCT(pipe_h265_pps);
> if (!context->desc.h265.pps) {
> FREE(context);
> diff --git a/src/gallium/state_trackers/va/picture.c
> b/src/gallium/state_trackers/va/picture.c
> index 25d2940..e80873b 100644
> --- a/src/gallium/state_trackers/va/picture.c
> +++ b/src/gallium/state_trackers/va/picture.c
> @@ -60,6 +60,12 @@ vlVaBeginPicture(VADriverContextP ctx,
> VAContextID context_id, VASurfaceID rende
>
> context->target = surf->buffer;
> if (!context->decoder) {
> + /* Decoder creation is delayed until max_references is set. */
> + if (u_reduce_video_profile(context->templat.profile) ==
> + PIPE_VIDEO_FORMAT_MPEG4_AVC)
> + return context->templat.max_references == 0 ?
> + VA_STATUS_SUCCESS : VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> /* VPP */
> if ((context->target->buffer_format !=
> PIPE_FORMAT_B8G8R8A8_UNORM &&
> context->target->buffer_format !=
> PIPE_FORMAT_R8G8B8A8_UNORM &&
> @@ -67,6 +73,7 @@ vlVaBeginPicture(VADriverContextP ctx,
> VAContextID context_id, VASurfaceID rende
> context->target->buffer_format !=
> PIPE_FORMAT_R8G8B8X8_UNORM) ||
> context->target->interlaced)
> return VA_STATUS_ERROR_UNIMPLEMENTED;
> +
> return VA_STATUS_SUCCESS;
> }
>
> @@ -86,16 +93,18 @@ vlVaGetReferenceFrame(vlVaDriver *drv,
> VASurfaceID surface_id,
> *ref_frame = NULL;
> }
>
> -static void
> +static VAStatus
> handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext
> *context, vlVaBuffer *buf)
> {
> - switch (u_reduce_video_profile(context->decoder->profile)) {
> + VAStatus vaStatus = VA_STATUS_SUCCESS;
> +
> + switch (u_reduce_video_profile(context->templat.profile)) {
> case PIPE_VIDEO_FORMAT_MPEG12:
> vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
> break;
>
> case PIPE_VIDEO_FORMAT_MPEG4_AVC:
> - vlVaHandlePictureParameterBufferH264(drv, context, buf);
> + vaStatus = vlVaHandlePictureParameterBufferH264(drv,
> context, buf);
> break;
>
> case PIPE_VIDEO_FORMAT_VC1:
> @@ -113,12 +122,14 @@ handlePictureParameterBuffer(vlVaDriver
> *drv, vlVaContext *context, vlVaBuffer *
> default:
> break;
> }
> +
> + return vaStatus;
> }
>
> static void
> handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
> {
> - switch (u_reduce_video_profile(context->decoder->profile)) {
> + switch (u_reduce_video_profile(context->templat.profile)) {
> case PIPE_VIDEO_FORMAT_MPEG12:
> vlVaHandleIQMatrixBufferMPEG12(context, buf);
> break;
> @@ -142,7 +153,7 @@ handleIQMatrixBuffer(vlVaContext *context,
> vlVaBuffer *buf)
> static void
> handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
> {
> - switch (u_reduce_video_profile(context->decoder->profile)) {
> + switch (u_reduce_video_profile(context->templat.profile)) {
> case PIPE_VIDEO_FORMAT_MPEG4_AVC:
> vlVaHandleSliceParameterBufferH264(context, buf);
> break;
> @@ -178,8 +189,8 @@ bufHasStartcode(vlVaBuffer *buf, unsigned int
> code, unsigned int bits)
> return 0;
> }
>
> -static void
> -handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
> +static VAStatus
> +handleVASliceDataBufferType(vlVaDriver *drv, vlVaContext
> *context, vlVaBuffer *buf)
> {
> enum pipe_video_format format;
> unsigned num_buffers = 0;
> @@ -189,7 +200,7 @@ handleVASliceDataBufferType(vlVaContext
> *context, vlVaBuffer *buf)
> static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
> static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01,
> 0x0d };
>
> - format = u_reduce_video_profile(context->decoder->profile);
> + format = u_reduce_video_profile(context->templat.profile);
> switch (format) {
> case PIPE_VIDEO_FORMAT_MPEG4_AVC:
> if (bufHasStartcode(buf, 0x000001, 24))
> @@ -232,6 +243,8 @@ handleVASliceDataBufferType(vlVaContext
> *context, vlVaBuffer *buf)
> ++num_buffers;
> context->decoder->decode_bitstream(context->decoder,
> context->target, &context->desc.base,
> num_buffers, (const void * const*)buffers, sizes);
> +
> + return VA_STATUS_SUCCESS;
> }
>
> VAStatus
> @@ -261,7 +274,7 @@ vlVaRenderPicture(VADriverContextP ctx,
> VAContextID context_id, VABufferID *buff
>
> switch (buf->type) {
> case VAPictureParameterBufferType:
> - handlePictureParameterBuffer(drv, context, buf);
> + vaStatus = handlePictureParameterBuffer(drv, context, buf);
> break;
>
> case VAIQMatrixBufferType:
> @@ -273,7 +286,7 @@ vlVaRenderPicture(VADriverContextP ctx,
> VAContextID context_id, VABufferID *buff
> break;
>
> case VASliceDataBufferType:
> - handleVASliceDataBufferType(context, buf);
> + vaStatus = handleVASliceDataBufferType(drv, context, buf);
> break;
> case VAProcPipelineParameterBufferType:
> vaStatus =
> vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
> @@ -305,6 +318,10 @@ vlVaEndPicture(VADriverContextP ctx,
> VAContextID context_id)
> return VA_STATUS_ERROR_INVALID_CONTEXT;
>
> if (!context->decoder) {
> + if (u_reduce_video_profile(context->templat.profile) ==
> + PIPE_VIDEO_FORMAT_MPEG4_AVC)
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> /* VPP */
> return VA_STATUS_SUCCESS;
> }
> diff --git a/src/gallium/state_trackers/va/picture_h264.c
> b/src/gallium/state_trackers/va/picture_h264.c
> index bd6c8a0..e9a8825 100644
> --- a/src/gallium/state_trackers/va/picture_h264.c
> +++ b/src/gallium/state_trackers/va/picture_h264.c
> @@ -26,9 +26,10 @@
> *
> **************************************************************************/
>
> +#include "util/u_video.h"
> #include "va_private.h"
>
> -void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv,
> vlVaContext *context, vlVaBuffer *buf)
> +VAStatus vlVaHandlePictureParameterBufferH264(vlVaDriver *drv,
> vlVaContext *context, vlVaBuffer *buf)
> {
> VAPictureParameterBufferH264 *h264 = buf->data;
>
> @@ -90,6 +91,32 @@ void
> vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext
> *context,
> h264->pic_fields.bits.redundant_pic_cnt_present_flag;
> /*reference_pic_flag*/
> context->desc.h264.frame_num = h264->frame_num;
> +
> + if (!context->decoder && context->desc.h264.num_ref_frames > 0)
> + context->templat.max_references =
> MIN2(context->desc.h264.num_ref_frames, 16);
> +
> + /* Create the decoder once max_references is known. */
> + if (!context->decoder) {
> + if (!context->target)
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> + if (context->templat.max_references == 0)
> + return VA_STATUS_ERROR_INVALID_BUFFER;
> +
> + context->templat.level =
> u_get_h264_level(context->templat.width,
> + context->templat.height, &context->templat.max_references);
> +
> + context->decoder = drv->pipe->create_video_codec(drv->pipe,
> + &context->templat);
> +
> + if (!context->decoder)
> + return VA_STATUS_ERROR_ALLOCATION_FAILED;
> +
> + context->decoder->begin_frame(context->decoder, context->target,
> + &context->desc.base);
> + }
> +
> + return VA_STATUS_SUCCESS;
> }
>
> void vlVaHandleIQMatrixBufferH264(vlVaContext *context,
> vlVaBuffer *buf)
> diff --git a/src/gallium/state_trackers/va/va_private.h
> b/src/gallium/state_trackers/va/va_private.h
> index ff1b9bd..cf9b29d 100644
> --- a/src/gallium/state_trackers/va/va_private.h
> +++ b/src/gallium/state_trackers/va/va_private.h
> @@ -215,7 +215,7 @@ typedef struct {
> } vlVaSubpicture;
>
> typedef struct {
> - struct pipe_video_codec *decoder;
> + struct pipe_video_codec templat, *decoder;
> struct pipe_video_buffer *target;
> union {
> struct pipe_picture_desc base;
> @@ -353,7 +353,7 @@ VAStatus
> vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv,
> vlVaContex
> void vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID
> surface_id, struct pipe_video_buffer **ref_frame);
> void vlVaHandlePictureParameterBufferMPEG12(vlVaDriver *drv,
> vlVaContext *context, vlVaBuffer *buf);
> void vlVaHandleIQMatrixBufferMPEG12(vlVaContext *context,
> vlVaBuffer *buf);
> -void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv,
> vlVaContext *context, vlVaBuffer *buf);
> +VAStatus vlVaHandlePictureParameterBufferH264(vlVaDriver *drv,
> vlVaContext *context, vlVaBuffer *buf);
> void vlVaHandleIQMatrixBufferH264(vlVaContext *context,
> vlVaBuffer *buf);
> void vlVaHandleSliceParameterBufferH264(vlVaContext *context,
> vlVaBuffer *buf);
> void vlVaHandlePictureParameterBufferVC1(vlVaDriver *drv,
> vlVaContext *context, vlVaBuffer *buf);
> --
> 1.9.1
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.freedesktop.org/archives/mesa-dev/attachments/20151125/36764054/attachment-0001.html>
More information about the mesa-dev
mailing list