[Mesa-dev] [PATCH v3] st/va: delay decoder creation until max_references is known
Christian König
christian.koenig at amd.com
Mon Nov 30 01:50:07 PST 2015
On 26.11.2015 09:45, Julien Isorce wrote:
> In general max_references cannot be based on num_render_targets.
>
> This patch allows to allocate buffers with an accurate size.
> I.e. no more than necessary. For other codecs it is a fixed
> value 2.
>
> This is similar behaviour as vaapi/vdpau-driver.
>
> For now HEVC case defaults to num_render_targets as before.
> But it could also benefits this change by setting a more
> accurate max_references number in handlePictureParameterBuffer.
>
> Signed-off-by: Julien Isorce <j.isorce at samsung.com>
Reviewed-by: Christian König <christian.koenig at amd.com>
> ---
> src/gallium/state_trackers/va/context.c | 48 +++++++++++++--------------
> src/gallium/state_trackers/va/picture.c | 49 +++++++++++++++++++++++-----
> src/gallium/state_trackers/va/picture_h264.c | 4 +++
> src/gallium/state_trackers/va/va_private.h | 2 +-
> 4 files changed, 69 insertions(+), 34 deletions(-)
>
> diff --git a/src/gallium/state_trackers/va/context.c b/src/gallium/state_trackers/va/context.c
> index f0051e5..192794f 100644
> --- a/src/gallium/state_trackers/va/context.c
> +++ b/src/gallium/state_trackers/va/context.c
> @@ -187,7 +187,6 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
> int picture_height, int flag, VASurfaceID *render_targets,
> int num_render_targets, VAContextID *context_id)
> {
> - struct pipe_video_codec templat = {};
> vlVaDriver *drv;
> vlVaContext *context;
> int is_vpp;
> @@ -213,27 +212,22 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
> return VA_STATUS_ERROR_INVALID_CONTEXT;
> }
> } else {
> - templat.profile = config_id;
> - templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
> - templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
> - templat.width = picture_width;
> - templat.height = picture_height;
> - templat.max_references = num_render_targets;
> - templat.expect_chunked_decode = true;
> -
> - if (u_reduce_video_profile(templat.profile) ==
> - PIPE_VIDEO_FORMAT_MPEG4_AVC)
> - templat.level = u_get_h264_level(templat.width, templat.height,
> - &templat.max_references);
> -
> - context->decoder = drv->pipe->create_video_codec(drv->pipe, &templat);
> - if (!context->decoder) {
> - FREE(context);
> - return VA_STATUS_ERROR_ALLOCATION_FAILED;
> - }
> -
> - if (u_reduce_video_profile(context->decoder->profile) ==
> - PIPE_VIDEO_FORMAT_MPEG4_AVC) {
> + context->templat.profile = config_id;
> + context->templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
> + context->templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
> + context->templat.width = picture_width;
> + context->templat.height = picture_height;
> + context->templat.expect_chunked_decode = true;
> +
> + switch (u_reduce_video_profile(context->templat.profile)) {
> + case PIPE_VIDEO_FORMAT_MPEG12:
> + case PIPE_VIDEO_FORMAT_VC1:
> + case PIPE_VIDEO_FORMAT_MPEG4:
> + context->templat.max_references = 2;
> + break;
> +
> + case PIPE_VIDEO_FORMAT_MPEG4_AVC:
> + context->templat.max_references = 0;
> context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
> if (!context->desc.h264.pps) {
> FREE(context);
> @@ -245,10 +239,10 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
> FREE(context);
> return VA_STATUS_ERROR_ALLOCATION_FAILED;
> }
> - }
> + break;
>
> - if (u_reduce_video_profile(context->decoder->profile) ==
> - PIPE_VIDEO_FORMAT_HEVC) {
> + case PIPE_VIDEO_FORMAT_HEVC:
> + context->templat.max_references = num_render_targets;
> context->desc.h265.pps = CALLOC_STRUCT(pipe_h265_pps);
> if (!context->desc.h265.pps) {
> FREE(context);
> @@ -260,6 +254,10 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
> FREE(context);
> return VA_STATUS_ERROR_ALLOCATION_FAILED;
> }
> + break;
> +
> + default:
> + break;
> }
> }
>
> diff --git a/src/gallium/state_trackers/va/picture.c b/src/gallium/state_trackers/va/picture.c
> index 25d2940..8d938f4 100644
> --- a/src/gallium/state_trackers/va/picture.c
> +++ b/src/gallium/state_trackers/va/picture.c
> @@ -59,14 +59,17 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
> return VA_STATUS_ERROR_INVALID_SURFACE;
>
> context->target = surf->buffer;
> +
> if (!context->decoder) {
> /* VPP */
> - if ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
> + if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN &&
> + ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
> context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM &&
> context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM &&
> context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM) ||
> - context->target->interlaced)
> + context->target->interlaced))
> return VA_STATUS_ERROR_UNIMPLEMENTED;
> +
> return VA_STATUS_SUCCESS;
> }
>
> @@ -86,10 +89,12 @@ vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
> *ref_frame = NULL;
> }
>
> -static void
> +static VAStatus
> handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
> {
> - switch (u_reduce_video_profile(context->decoder->profile)) {
> + VAStatus vaStatus = VA_STATUS_SUCCESS;
> +
> + switch (u_reduce_video_profile(context->templat.profile)) {
> case PIPE_VIDEO_FORMAT_MPEG12:
> vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
> break;
> @@ -113,12 +118,37 @@ handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *
> default:
> break;
> }
> +
> + /* Create the decoder once max_references is known. */
> + if (!context->decoder) {
> + if (!context->target)
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> + if (context->templat.max_references == 0)
> + return VA_STATUS_ERROR_INVALID_BUFFER;
> +
> + if (u_reduce_video_profile(context->templat.profile) !=
> + PIPE_VIDEO_FORMAT_MPEG4_AVC)
> + context->templat.level = u_get_h264_level(context->templat.width,
> + context->templat.height, &context->templat.max_references);
> +
> + context->decoder = drv->pipe->create_video_codec(drv->pipe,
> + &context->templat);
> +
> + if (!context->decoder)
> + return VA_STATUS_ERROR_ALLOCATION_FAILED;
> +
> + context->decoder->begin_frame(context->decoder, context->target,
> + &context->desc.base);
> + }
> +
> + return vaStatus;
> }
>
> static void
> handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
> {
> - switch (u_reduce_video_profile(context->decoder->profile)) {
> + switch (u_reduce_video_profile(context->templat.profile)) {
> case PIPE_VIDEO_FORMAT_MPEG12:
> vlVaHandleIQMatrixBufferMPEG12(context, buf);
> break;
> @@ -142,7 +172,7 @@ handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
> static void
> handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
> {
> - switch (u_reduce_video_profile(context->decoder->profile)) {
> + switch (u_reduce_video_profile(context->templat.profile)) {
> case PIPE_VIDEO_FORMAT_MPEG4_AVC:
> vlVaHandleSliceParameterBufferH264(context, buf);
> break;
> @@ -189,7 +219,7 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
> static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
> static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
>
> - format = u_reduce_video_profile(context->decoder->profile);
> + format = u_reduce_video_profile(context->templat.profile);
> switch (format) {
> case PIPE_VIDEO_FORMAT_MPEG4_AVC:
> if (bufHasStartcode(buf, 0x000001, 24))
> @@ -261,7 +291,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
>
> switch (buf->type) {
> case VAPictureParameterBufferType:
> - handlePictureParameterBuffer(drv, context, buf);
> + vaStatus = handlePictureParameterBuffer(drv, context, buf);
> break;
>
> case VAIQMatrixBufferType:
> @@ -305,6 +335,9 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
> return VA_STATUS_ERROR_INVALID_CONTEXT;
>
> if (!context->decoder) {
> + if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> /* VPP */
> return VA_STATUS_SUCCESS;
> }
> diff --git a/src/gallium/state_trackers/va/picture_h264.c b/src/gallium/state_trackers/va/picture_h264.c
> index bd6c8a0..f6e5b70 100644
> --- a/src/gallium/state_trackers/va/picture_h264.c
> +++ b/src/gallium/state_trackers/va/picture_h264.c
> @@ -26,6 +26,7 @@
> *
> **************************************************************************/
>
> +#include "util/u_video.h"
> #include "va_private.h"
>
> void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
> @@ -90,6 +91,9 @@ void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context,
> h264->pic_fields.bits.redundant_pic_cnt_present_flag;
> /*reference_pic_flag*/
> context->desc.h264.frame_num = h264->frame_num;
> +
> + if (!context->decoder && context->desc.h264.num_ref_frames > 0)
> + context->templat.max_references = MIN2(context->desc.h264.num_ref_frames, 16);
> }
>
> void vlVaHandleIQMatrixBufferH264(vlVaContext *context, vlVaBuffer *buf)
> diff --git a/src/gallium/state_trackers/va/va_private.h b/src/gallium/state_trackers/va/va_private.h
> index ff1b9bd..6739efc 100644
> --- a/src/gallium/state_trackers/va/va_private.h
> +++ b/src/gallium/state_trackers/va/va_private.h
> @@ -215,7 +215,7 @@ typedef struct {
> } vlVaSubpicture;
>
> typedef struct {
> - struct pipe_video_codec *decoder;
> + struct pipe_video_codec templat, *decoder;
> struct pipe_video_buffer *target;
> union {
> struct pipe_picture_desc base;
More information about the mesa-dev
mailing list