<div dir="ltr"><div>For commit message please read:<br><br></div>"HEVC case is left unchanged since delaying decoder creation is not needed on AMD hardware."<br><br><div>instead of <br><br>"XXX: do the same for HEVC"</div></div><div class="gmail_extra"><br><div class="gmail_quote">On 25 November 2015 at 09:07, Julien Isorce <span dir="ltr"><<a href="mailto:julien.isorce@gmail.com" target="_blank">julien.isorce@gmail.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">From: Julien Isorce <<a href="mailto:julien.isorce@gmail.com">julien.isorce@gmail.com</a>><br>
<br>
In general max_references cannot be based on num_render_targets.<br>
<br>
This patch allow to allocate accurate sizes for buffers.<br>
For other codecs it is a fixed value to 2.<br>
<br>
This is similar behaviour as vaapi/vdpau-driver.<br>
<br>
XXX: do the same for HEVC<br>
<br>
Signed-off-by: Julien Isorce <<a href="mailto:j.isorce@samsung.com">j.isorce@samsung.com</a>><br>
---<br>
src/gallium/state_trackers/va/context.c | 41 ++++++++++++++--------------<br>
src/gallium/state_trackers/va/picture.c | 37 ++++++++++++++++++-------<br>
src/gallium/state_trackers/va/picture_h264.c | 29 +++++++++++++++++++-<br>
src/gallium/state_trackers/va/va_private.h | 4 +--<br>
4 files changed, 78 insertions(+), 33 deletions(-)<br>
<br>
diff --git a/src/gallium/state_trackers/va/context.c b/src/gallium/state_trackers/va/context.c<br>
index f0051e5..985007b 100644<br>
--- a/src/gallium/state_trackers/va/context.c<br>
+++ b/src/gallium/state_trackers/va/context.c<br>
@@ -187,7 +187,6 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,<br>
int picture_height, int flag, VASurfaceID *render_targets,<br>
int num_render_targets, VAContextID *context_id)<br>
{<br>
- struct pipe_video_codec templat = {};<br>
vlVaDriver *drv;<br>
vlVaContext *context;<br>
int is_vpp;<br>
@@ -213,27 +212,28 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,<br>
return VA_STATUS_ERROR_INVALID_CONTEXT;<br>
}<br>
} else {<br>
- templat.profile = config_id;<br>
- templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;<br>
- templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;<br>
- templat.width = picture_width;<br>
- templat.height = picture_height;<br>
- templat.max_references = num_render_targets;<br>
- templat.expect_chunked_decode = true;<br>
-<br>
- if (u_reduce_video_profile(templat.profile) ==<br>
- PIPE_VIDEO_FORMAT_MPEG4_AVC)<br>
- templat.level = u_get_h264_level(templat.width, templat.height,<br>
- &templat.max_references);<br>
-<br>
- context->decoder = drv->pipe->create_video_codec(drv->pipe, &templat);<br>
- if (!context->decoder) {<br>
- FREE(context);<br>
- return VA_STATUS_ERROR_ALLOCATION_FAILED;<br>
+ context->templat.profile = config_id;<br>
+ context->templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;<br>
+ context->templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;<br>
+ context->templat.width = picture_width;<br>
+ context->templat.height = picture_height;<br>
+ context->templat.max_references = 2;<br>
+ context->templat.expect_chunked_decode = true;<br>
+<br>
+ /* Can only create decoders for which max_references is known. */<br>
+ if (u_reduce_video_profile(context->templat.profile) !=<br>
+ PIPE_VIDEO_FORMAT_MPEG4_AVC) {<br>
+ context->decoder = drv->pipe->create_video_codec(drv->pipe,<br>
+ &context->templat);<br>
+ if (!context->decoder) {<br>
+ FREE(context);<br>
+ return VA_STATUS_ERROR_ALLOCATION_FAILED;<br>
+ }<br>
}<br>
<br>
- if (u_reduce_video_profile(context->decoder->profile) ==<br>
+ if (u_reduce_video_profile(context->templat.profile) ==<br>
PIPE_VIDEO_FORMAT_MPEG4_AVC) {<br>
+ context->templat.max_references = 0;<br>
context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);<br>
if (!context->desc.h264.pps) {<br>
FREE(context);<br>
@@ -247,8 +247,9 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,<br>
}<br>
}<br>
<br>
- if (u_reduce_video_profile(context->decoder->profile) ==<br>
+ if (u_reduce_video_profile(context->templat.profile) ==<br>
PIPE_VIDEO_FORMAT_HEVC) {<br>
+ context->templat.max_references = num_render_targets;<br>
context->desc.h265.pps = CALLOC_STRUCT(pipe_h265_pps);<br>
if (!context->desc.h265.pps) {<br>
FREE(context);<br>
diff --git a/src/gallium/state_trackers/va/picture.c b/src/gallium/state_trackers/va/picture.c<br>
index 25d2940..e80873b 100644<br>
--- a/src/gallium/state_trackers/va/picture.c<br>
+++ b/src/gallium/state_trackers/va/picture.c<br>
@@ -60,6 +60,12 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende<br>
<br>
context->target = surf->buffer;<br>
if (!context->decoder) {<br>
+ /* Decoder creation is delayed until max_references is set. */<br>
+ if (u_reduce_video_profile(context->templat.profile) ==<br>
+ PIPE_VIDEO_FORMAT_MPEG4_AVC)<br>
+ return context->templat.max_references == 0 ?<br>
+ VA_STATUS_SUCCESS : VA_STATUS_ERROR_INVALID_CONTEXT;<br>
+<br>
/* VPP */<br>
if ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&<br>
context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM &&<br>
@@ -67,6 +73,7 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende<br>
context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM) ||<br>
context->target->interlaced)<br>
return VA_STATUS_ERROR_UNIMPLEMENTED;<br>
+<br>
return VA_STATUS_SUCCESS;<br>
}<br>
<br>
@@ -86,16 +93,18 @@ vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,<br>
*ref_frame = NULL;<br>
}<br>
<br>
-static void<br>
+static VAStatus<br>
handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)<br>
{<br>
- switch (u_reduce_video_profile(context->decoder->profile)) {<br>
+ VAStatus vaStatus = VA_STATUS_SUCCESS;<br>
+<br>
+ switch (u_reduce_video_profile(context->templat.profile)) {<br>
case PIPE_VIDEO_FORMAT_MPEG12:<br>
vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);<br>
break;<br>
<br>
case PIPE_VIDEO_FORMAT_MPEG4_AVC:<br>
- vlVaHandlePictureParameterBufferH264(drv, context, buf);<br>
+ vaStatus = vlVaHandlePictureParameterBufferH264(drv, context, buf);<br>
break;<br>
<br>
case PIPE_VIDEO_FORMAT_VC1:<br>
@@ -113,12 +122,14 @@ handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *<br>
default:<br>
break;<br>
}<br>
+<br>
+ return vaStatus;<br>
}<br>
<br>
static void<br>
handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)<br>
{<br>
- switch (u_reduce_video_profile(context->decoder->profile)) {<br>
+ switch (u_reduce_video_profile(context->templat.profile)) {<br>
case PIPE_VIDEO_FORMAT_MPEG12:<br>
vlVaHandleIQMatrixBufferMPEG12(context, buf);<br>
break;<br>
@@ -142,7 +153,7 @@ handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)<br>
static void<br>
handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)<br>
{<br>
- switch (u_reduce_video_profile(context->decoder->profile)) {<br>
+ switch (u_reduce_video_profile(context->templat.profile)) {<br>
case PIPE_VIDEO_FORMAT_MPEG4_AVC:<br>
vlVaHandleSliceParameterBufferH264(context, buf);<br>
break;<br>
@@ -178,8 +189,8 @@ bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)<br>
return 0;<br>
}<br>
<br>
-static void<br>
-handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)<br>
+static VAStatus<br>
+handleVASliceDataBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)<br>
{<br>
enum pipe_video_format format;<br>
unsigned num_buffers = 0;<br>
@@ -189,7 +200,7 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)<br>
static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };<br>
static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };<br>
<br>
- format = u_reduce_video_profile(context->decoder->profile);<br>
+ format = u_reduce_video_profile(context->templat.profile);<br>
switch (format) {<br>
case PIPE_VIDEO_FORMAT_MPEG4_AVC:<br>
if (bufHasStartcode(buf, 0x000001, 24))<br>
@@ -232,6 +243,8 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)<br>
++num_buffers;<br>
context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,<br>
num_buffers, (const void * const*)buffers, sizes);<br>
+<br>
+ return VA_STATUS_SUCCESS;<br>
}<br>
<br>
VAStatus<br>
@@ -261,7 +274,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff<br>
<br>
switch (buf->type) {<br>
case VAPictureParameterBufferType:<br>
- handlePictureParameterBuffer(drv, context, buf);<br>
+ vaStatus = handlePictureParameterBuffer(drv, context, buf);<br>
break;<br>
<br>
case VAIQMatrixBufferType:<br>
@@ -273,7 +286,7 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff<br>
break;<br>
<br>
case VASliceDataBufferType:<br>
- handleVASliceDataBufferType(context, buf);<br>
+ vaStatus = handleVASliceDataBufferType(drv, context, buf);<br>
break;<br>
case VAProcPipelineParameterBufferType:<br>
vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);<br>
@@ -305,6 +318,10 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)<br>
return VA_STATUS_ERROR_INVALID_CONTEXT;<br>
<br>
if (!context->decoder) {<br>
+ if (u_reduce_video_profile(context->templat.profile) ==<br>
+ PIPE_VIDEO_FORMAT_MPEG4_AVC)<br>
+ return VA_STATUS_ERROR_INVALID_CONTEXT;<br>
+<br>
/* VPP */<br>
return VA_STATUS_SUCCESS;<br>
}<br>
diff --git a/src/gallium/state_trackers/va/picture_h264.c b/src/gallium/state_trackers/va/picture_h264.c<br>
index bd6c8a0..e9a8825 100644<br>
--- a/src/gallium/state_trackers/va/picture_h264.c<br>
+++ b/src/gallium/state_trackers/va/picture_h264.c<br>
@@ -26,9 +26,10 @@<br>
*<br>
**************************************************************************/<br>
<br>
+#include "util/u_video.h"<br>
#include "va_private.h"<br>
<br>
-void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)<br>
+VAStatus vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)<br>
{<br>
VAPictureParameterBufferH264 *h264 = buf->data;<br>
<br>
@@ -90,6 +91,32 @@ void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context,<br>
h264->pic_fields.bits.redundant_pic_cnt_present_flag;<br>
/*reference_pic_flag*/<br>
context->desc.h264.frame_num = h264->frame_num;<br>
+<br>
+ if (!context->decoder && context->desc.h264.num_ref_frames > 0)<br>
+ context->templat.max_references = MIN2(context->desc.h264.num_ref_frames, 16);<br>
+<br>
+ /* Create the decoder once max_references is known. */<br>
+ if (!context->decoder) {<br>
+ if (!context->target)<br>
+ return VA_STATUS_ERROR_INVALID_CONTEXT;<br>
+<br>
+ if (context->templat.max_references == 0)<br>
+ return VA_STATUS_ERROR_INVALID_BUFFER;<br>
+<br>
+ context->templat.level = u_get_h264_level(context->templat.width,<br>
+ context->templat.height, &context->templat.max_references);<br>
+<br>
+ context->decoder = drv->pipe->create_video_codec(drv->pipe,<br>
+ &context->templat);<br>
+<br>
+ if (!context->decoder)<br>
+ return VA_STATUS_ERROR_ALLOCATION_FAILED;<br>
+<br>
+ context->decoder->begin_frame(context->decoder, context->target,<br>
+ &context->desc.base);<br>
+ }<br>
+<br>
+ return VA_STATUS_SUCCESS;<br>
}<br>
<br>
void vlVaHandleIQMatrixBufferH264(vlVaContext *context, vlVaBuffer *buf)<br>
diff --git a/src/gallium/state_trackers/va/va_private.h b/src/gallium/state_trackers/va/va_private.h<br>
index ff1b9bd..cf9b29d 100644<br>
--- a/src/gallium/state_trackers/va/va_private.h<br>
+++ b/src/gallium/state_trackers/va/va_private.h<br>
@@ -215,7 +215,7 @@ typedef struct {<br>
} vlVaSubpicture;<br>
<br>
typedef struct {<br>
- struct pipe_video_codec *decoder;<br>
+ struct pipe_video_codec templat, *decoder;<br>
struct pipe_video_buffer *target;<br>
union {<br>
struct pipe_picture_desc base;<br>
@@ -353,7 +353,7 @@ VAStatus vlVaHandleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContex<br>
void vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id, struct pipe_video_buffer **ref_frame);<br>
void vlVaHandlePictureParameterBufferMPEG12(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf);<br>
void vlVaHandleIQMatrixBufferMPEG12(vlVaContext *context, vlVaBuffer *buf);<br>
-void vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf);<br>
+VAStatus vlVaHandlePictureParameterBufferH264(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf);<br>
void vlVaHandleIQMatrixBufferH264(vlVaContext *context, vlVaBuffer *buf);<br>
void vlVaHandleSliceParameterBufferH264(vlVaContext *context, vlVaBuffer *buf);<br>
void vlVaHandlePictureParameterBufferVC1(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf);<br>
<span class="HOEnZb"><font color="#888888">--<br>
1.9.1<br>
<br>
</font></span></blockquote></div><br></div>