[Mesa-dev] [PATCH 09/12] st/va: add functions for VAAPI encode
Christian König
deathsimple at vodafone.de
Wed Jul 20 08:49:10 UTC 2016
Am 20.07.2016 um 06:21 schrieb Zhang, Boyuan:
>
> >> - context->decoder->begin_frame(context->decoder, context->target,
> &context->desc.base);
> >> + if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
> >> + context->decoder->begin_frame(context->decoder, context->target,
> &context->desc.base);
>
>
> >Why do we do so here? Could we avoid that?
>
> >I would rather like to keep the begin_frame()/end_frame() handling as
> it is.
>
> >Christian.
>
>
> This is on purpose. Based on my testing, application will call
> begin_frame first, then call PictureParameter/SequenceParameter/... to
> pass us all picture related parameters. However, some of those values
> are actually required by begin_picture call in radeon_vce. So we have
> to delay the call until we receive all the parameters that needed.
> Same applies to encode_bitstream call. That's why I delay both calls
> to end_frame where we get all necessary values.
>
We can keep it like this for now, but I would prefer that we clean this
up and change the radeon_vce so that it matches the begin/encode/end
calls from VA-API.
We should probably work on this together with the performance improvements.
Regards,
Christian.
>
> Regards,
>
> Boyuan
>
> ------------------------------------------------------------------------
> *From:* Christian König <deathsimple at vodafone.de>
> *Sent:* July 19, 2016 4:55:43 AM
> *To:* Zhang, Boyuan; mesa-dev at lists.freedesktop.org
> *Cc:* adf.lists at gmail.com
> *Subject:* Re: [PATCH 09/12] st/va: add functions for VAAPI encode
> Am 19.07.2016 um 00:43 schrieb Boyuan Zhang:
> > Add necessary functions/changes for VAAPI encoding to buffer and
> picture. These changes will allow driver to handle all Vaapi encode
> related operations. This patch doesn't change the Vaapi decode behaviour.
> >
> > Signed-off-by: Boyuan Zhang <boyuan.zhang at amd.com>
> > ---
> > src/gallium/state_trackers/va/buffer.c | 6 +
> > src/gallium/state_trackers/va/picture.c | 169
> ++++++++++++++++++++++++++++-
> > src/gallium/state_trackers/va/va_private.h | 3 +
> > 3 files changed, 176 insertions(+), 2 deletions(-)
> >
> > diff --git a/src/gallium/state_trackers/va/buffer.c
> b/src/gallium/state_trackers/va/buffer.c
> > index 7d3167b..dfcebbe 100644
> > --- a/src/gallium/state_trackers/va/buffer.c
> > +++ b/src/gallium/state_trackers/va/buffer.c
> > @@ -133,6 +133,12 @@ vlVaMapBuffer(VADriverContextP ctx, VABufferID
> buf_id, void **pbuff)
> > if (!buf->derived_surface.transfer || !*pbuff)
> > return VA_STATUS_ERROR_INVALID_BUFFER;
> >
> > + if (buf->type == VAEncCodedBufferType) {
> > + ((VACodedBufferSegment*)buf->data)->buf = *pbuff;
> > + ((VACodedBufferSegment*)buf->data)->size = buf->coded_size;
> > + ((VACodedBufferSegment*)buf->data)->next = NULL;
> > + *pbuff = buf->data;
> > + }
> > } else {
> > pipe_mutex_unlock(drv->mutex);
> > *pbuff = buf->data;
> > diff --git a/src/gallium/state_trackers/va/picture.c
> b/src/gallium/state_trackers/va/picture.c
> > index 89ac024..4793194 100644
> > --- a/src/gallium/state_trackers/va/picture.c
> > +++ b/src/gallium/state_trackers/va/picture.c
> > @@ -78,7 +78,8 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID
> context_id, VASurfaceID rende
> > return VA_STATUS_SUCCESS;
> > }
> >
> > - context->decoder->begin_frame(context->decoder, context->target,
> &context->desc.base);
> > + if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
> > + context->decoder->begin_frame(context->decoder, context->target,
> &context->desc.base);
>
> Why do we do so here? Could we avoid that?
>
> I would rather like to keep the begin_frame()/end_frame() handling as
> it is.
>
> Christian.
>
> >
> > return VA_STATUS_SUCCESS;
> > }
> > @@ -278,6 +279,139 @@ handleVASliceDataBufferType(vlVaContext
> *context, vlVaBuffer *buf)
> > num_buffers, (const void * const*)buffers, sizes);
> > }
> >
> > +static VAStatus
> > +handleVAEncMiscParameterTypeRateControl(vlVaContext *context,
> VAEncMiscParameterBuffer *misc)
> > +{
> > + VAEncMiscParameterRateControl *rc =
> (VAEncMiscParameterRateControl *)misc->data;
> > + if (context->desc.h264enc.rate_ctrl.rate_ctrl_method ==
> > + PIPE_H264_ENC_RATE_CONTROL_METHOD_CONSTANT)
> > + context->desc.h264enc.rate_ctrl.target_bitrate = rc->bits_per_second;
> > + else
> > + context->desc.h264enc.rate_ctrl.target_bitrate =
> rc->bits_per_second * rc->target_percentage;
> > + context->desc.h264enc.rate_ctrl.peak_bitrate = rc->bits_per_second;
> > + if (context->desc.h264enc.rate_ctrl.target_bitrate < 2000000)
> > + context->desc.h264enc.rate_ctrl.vbv_buffer_size =
> MIN2((context->desc.h264enc.rate_ctrl.target_bitrate * 2.75), 2000000);
> > + else
> > + context->desc.h264enc.rate_ctrl.vbv_buffer_size =
> context->desc.h264enc.rate_ctrl.target_bitrate;
> > + context->desc.h264enc.rate_ctrl.target_bits_picture =
> > + context->desc.h264enc.rate_ctrl.target_bitrate /
> context->desc.h264enc.rate_ctrl.frame_rate_num;
> > + context->desc.h264enc.rate_ctrl.peak_bits_picture_integer =
> > + context->desc.h264enc.rate_ctrl.peak_bitrate /
> context->desc.h264enc.rate_ctrl.frame_rate_num;
> > + context->desc.h264enc.rate_ctrl.peak_bits_picture_fraction = 0;
> > +
> > + return VA_STATUS_SUCCESS;
> > +}
> > +
> > +static VAStatus
> > +handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext
> *context, vlVaBuffer *buf)
> > +{
> > + VAEncSequenceParameterBufferH264 *h264 =
> (VAEncSequenceParameterBufferH264 *)buf->data;
> > + if (!context->decoder) {
> > + context->templat.max_references = h264->max_num_ref_frames;
> > + context->templat.level = h264->level_idc;
> > + context->decoder = drv->pipe->create_video_codec(drv->pipe,
> &context->templat);
> > + if (!context->decoder)
> > + return VA_STATUS_ERROR_ALLOCATION_FAILED;
> > + }
> > + context->desc.h264enc.gop_size = h264->intra_idr_period;
> > + context->desc.h264enc.rate_ctrl.frame_rate_num =
> h264->time_scale / 2;
> > + context->desc.h264enc.rate_ctrl.frame_rate_den = 1;
> > + return VA_STATUS_SUCCESS;
> > +}
> > +
> > +static VAStatus
> > +handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer
> *buf)
> > +{
> > + VAStatus vaStatus = VA_STATUS_SUCCESS;
> > + VAEncMiscParameterBuffer *misc;
> > + misc = buf->data;
> > +
> > + switch (misc->type) {
> > + case VAEncMiscParameterTypeRateControl:
> > + vaStatus = handleVAEncMiscParameterTypeRateControl(context,
> misc);
> > + break;
> > +
> > + default:
> > + break;
> > + }
> > +
> > + return vaStatus;
> > +}
> > +
> > +static VAStatus
> > +handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext
> *context, vlVaBuffer *buf)
> > +{
> > + VAEncPictureParameterBufferH264 *h264;
> > + vlVaBuffer *coded_buf;
> > +
> > + h264 = buf->data;
> > + context->desc.h264enc.frame_num = h264->frame_num;
> > + context->desc.h264enc.not_referenced = false;
> > + context->desc.h264enc.is_idr =
> (h264->pic_fields.bits.idr_pic_flag == 1);
> > + context->desc.h264enc.pic_order_cnt =
> h264->CurrPic.TopFieldOrderCnt / 2;
> > + if (context->desc.h264enc.is_idr)
> > + context->desc.h264enc.i_remain = 1;
> > + else
> > + context->desc.h264enc.i_remain = 0;
> > +
> > + context->desc.h264enc.p_remain = context->desc.h264enc.gop_size
> - context->desc.h264enc.gop_cnt - context->desc.h264enc.i_remain;
> > +
> > + coded_buf = handle_table_get(drv->htab, h264->coded_buf);
> > + if (!coded_buf->derived_surface.resource)
> > + coded_buf->derived_surface.resource =
> pipe_buffer_create(drv->pipe->screen, PIPE_BIND_VERTEX_BUFFER,
> > + PIPE_USAGE_STREAM, coded_buf->size);
> > + context->coded_buf = coded_buf;
> > +
> > + context->desc.h264enc.frame_idx[h264->CurrPic.picture_id] =
> h264->frame_num;
> > + if (context->desc.h264enc.is_idr)
> > + context->desc.h264enc.picture_type =
> PIPE_H264_ENC_PICTURE_TYPE_IDR;
> > + else
> > + context->desc.h264enc.picture_type =
> PIPE_H264_ENC_PICTURE_TYPE_P;
> > +
> > + context->desc.h264enc.frame_num_cnt++;
> > + context->desc.h264enc.gop_cnt++;
> > + if (context->desc.h264enc.gop_cnt == context->desc.h264enc.gop_size)
> > + context->desc.h264enc.gop_cnt = 0;
> > +
> > + return VA_STATUS_SUCCESS;
> > +}
> > +
> > +static VAStatus
> > +handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext
> *context, vlVaBuffer *buf)
> > +{
> > + VAEncSliceParameterBufferH264 *h264;
> > +
> > + h264 = buf->data;
> > + context->desc.h264enc.ref_idx_l0 = VA_INVALID_ID;
> > + context->desc.h264enc.ref_idx_l1 = VA_INVALID_ID;
> > +
> > + for (int i = 0; i < 32; i++) {
> > + if (h264->RefPicList0[i].picture_id != VA_INVALID_ID) {
> > + if (context->desc.h264enc.ref_idx_l0 == VA_INVALID_ID)
> > + context->desc.h264enc.ref_idx_l0 =
> context->desc.h264enc.frame_idx[h264->RefPicList0[i].picture_id];
> > + }
> > + if (h264->RefPicList1[i].picture_id != VA_INVALID_ID &&
> h264->slice_type == 1) {
> > + if (context->desc.h264enc.ref_idx_l1 == VA_INVALID_ID)
> > + context->desc.h264enc.ref_idx_l1 =
> context->desc.h264enc.frame_idx[h264->RefPicList1[i].picture_id];
> > + }
> > + }
> > +
> > + if (h264->slice_type == 1)
> > + context->desc.h264enc.picture_type =
> PIPE_H264_ENC_PICTURE_TYPE_B;
> > + else if (h264->slice_type == 0)
> > + context->desc.h264enc.picture_type =
> PIPE_H264_ENC_PICTURE_TYPE_P;
> > + else if (h264->slice_type == 2) {
> > + if (context->desc.h264enc.is_idr){
> > + context->desc.h264enc.picture_type =
> PIPE_H264_ENC_PICTURE_TYPE_IDR;
> > + context->desc.h264enc.idr_pic_id++;
> > + } else
> > + context->desc.h264enc.picture_type =
> PIPE_H264_ENC_PICTURE_TYPE_I;
> > + } else
> > + context->desc.h264enc.picture_type =
> PIPE_H264_ENC_PICTURE_TYPE_SKIP;
> > +
> > + return VA_STATUS_SUCCESS;
> > +}
> > +
> > VAStatus
> > vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id,
> VABufferID *buffers, int num_buffers)
> > {
> > @@ -328,6 +462,22 @@ vlVaRenderPicture(VADriverContextP ctx,
> VAContextID context_id, VABufferID *buff
> > vaStatus =
> vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
> > break;
> >
> > + case VAEncSequenceParameterBufferType:
> > + vaStatus = handleVAEncSequenceParameterBufferType(drv,
> context, buf);
> > + break;
> > +
> > + case VAEncMiscParameterBufferType:
> > + vaStatus = handleVAEncMiscParameterBufferType(context, buf);
> > + break;
> > +
> > + case VAEncPictureParameterBufferType:
> > + vaStatus = handleVAEncPictureParameterBufferType(drv,
> context, buf);
> > + break;
> > +
> > + case VAEncSliceParameterBufferType:
> > + vaStatus = handleVAEncSliceParameterBufferType(drv,
> context, buf);
> > + break;
> > +
> > default:
> > break;
> > }
> > @@ -342,6 +492,9 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID
> context_id)
> > {
> > vlVaDriver *drv;
> > vlVaContext *context;
> > + vlVaBuffer *coded_buf;
> > + unsigned int coded_size;
> > + void *feedback;
> >
> > if (!ctx)
> > return VA_STATUS_ERROR_INVALID_CONTEXT;
> > @@ -365,7 +518,19 @@ vlVaEndPicture(VADriverContextP ctx,
> VAContextID context_id)
> > }
> >
> > context->mpeg4.frame_num++;
> > - context->decoder->end_frame(context->decoder, context->target,
> &context->desc.base);
> > +
> > + if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
> > + coded_buf = context->coded_buf;
> > + context->decoder->begin_frame(context->decoder, context->target,
> &context->desc.base);
> > + context->decoder->encode_bitstream(context->decoder, context->target,
> > + coded_buf->derived_surface.resource, &feedback);
> > + context->decoder->end_frame(context->decoder, context->target,
> &context->desc.base);
> > + context->decoder->flush(context->decoder);
> > + context->decoder->get_feedback(context->decoder, feedback,
> &coded_size);
> > + coded_buf->coded_size = coded_size;
> > + }
> > + else
> > + context->decoder->end_frame(context->decoder, context->target,
> &context->desc.base);
> >
> > return VA_STATUS_SUCCESS;
> > }
> > diff --git a/src/gallium/state_trackers/va/va_private.h
> b/src/gallium/state_trackers/va/va_private.h
> > index ad9010a..6d3ac38 100644
> > --- a/src/gallium/state_trackers/va/va_private.h
> > +++ b/src/gallium/state_trackers/va/va_private.h
> > @@ -229,6 +229,7 @@ typedef struct {
> > struct pipe_vc1_picture_desc vc1;
> > struct pipe_h264_picture_desc h264;
> > struct pipe_h265_picture_desc h265;
> > + struct pipe_h264_enc_picture_desc h264enc;
> > } desc;
> >
> > struct {
> > @@ -241,6 +242,7 @@ typedef struct {
> > } mpeg4;
> >
> > struct vl_deint_filter *deint;
> > + struct vlVaBuffer *coded_buf;
> > } vlVaContext;
> >
> > typedef struct {
> > @@ -260,6 +262,7 @@ typedef struct {
> > } derived_surface;
> > unsigned int export_refcount;
> > VABufferInfo export_state;
> > + unsigned int coded_size;
> > } vlVaBuffer;
> >
> > typedef struct {
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/mesa-dev/attachments/20160720/db2067ab/attachment-0001.html>
More information about the mesa-dev
mailing list