[Mesa-dev] [PATCH v2 7/8] st/va: add colospace conversion through Video Post Processing
Julien Isorce
j.isorce at samsung.com
Thu Oct 29 09:26:44 PDT 2015
Ack. I confirm it still works without it. So I'll remove it. Thx
-----Original Message-----
From: Christian König [mailto:deathsimple at vodafone.de]
Sent: 29 October 2015 14:02
To: Julien Isorce; mesa-dev at lists.freedesktop.org
Cc: emil.l.velikov at gmail.com
Subject: Re: [Mesa-dev] [PATCH v2 7/8] st/va: add colospace conversion through Video Post Processing
> + if(src_surface->fence) {
> + screen->fence_finish(screen, src_surface->fence, PIPE_TIMEOUT_INFINITE);
> + screen->fence_reference(screen, &src_surface->fence, NULL);
> + }
That shouldn't be necessary cause all render operations to the same surface are pipelined anyway.
Regards,
Christian.
On 22.10.2015 18:37, Julien Isorce wrote:
> Add support for VPP in the following functions:
> vlVaCreateContext
> vlVaDestroyContext
> vlVaBeginPicture
> vlVaRenderPicture
> vlVaEndPicture
>
> Add support for VAProcFilterNone in:
> vlVaQueryVideoProcFilters
> vlVaQueryVideoProcFilterCaps
> vlVaQueryVideoProcPipelineCaps
>
> Add handleVAProcPipelineParameterBufferType helper.
>
> One application is:
> VASurfaceNV12 -> gstvaapipostproc -> VASurfaceRGBA
>
> Signed-off-by: Julien Isorce <j.isorce at samsung.com>
> ---
> src/gallium/state_trackers/va/context.c | 91 +++++++++++++++++----------
> src/gallium/state_trackers/va/picture.c | 86 +++++++++++++++++++++++++-
> src/gallium/state_trackers/va/surface.c | 98 +++++++++++++++++++++++++++++-
> src/gallium/state_trackers/va/va_private.h | 8 +++
> 4 files changed, 248 insertions(+), 35 deletions(-)
>
> diff --git a/src/gallium/state_trackers/va/context.c
> b/src/gallium/state_trackers/va/context.c
> index 9be9085..170e9d6 100644
> --- a/src/gallium/state_trackers/va/context.c
> +++ b/src/gallium/state_trackers/va/context.c
> @@ -87,6 +87,14 @@ static struct VADriverVTable vtable =
> &vlVaQuerySurfaceAttributes
> };
>
> +static struct VADriverVTableVPP vtable_vpp = {
> + 1,
> + &vlVaQueryVideoProcFilters,
> + &vlVaQueryVideoProcFilterCaps,
> + &vlVaQueryVideoProcPipelineCaps
> +};
> +
> PUBLIC VAStatus
> VA_DRIVER_INIT_FUNC(VADriverContextP ctx)
> {
> @@ -122,6 +130,7 @@ VA_DRIVER_INIT_FUNC(VADriverContextP ctx)
> ctx->version_major = 0;
> ctx->version_minor = 1;
> *ctx->vtable = vtable;
> + *ctx->vtable_vpp = vtable_vpp;
> ctx->max_profiles = PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH - PIPE_VIDEO_PROFILE_UNKNOWN;
> ctx->max_entrypoints = 1;
> ctx->max_attributes = 1;
> @@ -151,11 +160,15 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
> struct pipe_video_codec templat = {};
> vlVaDriver *drv;
> vlVaContext *context;
> + int is_vpp;
>
> if (!ctx)
> return VA_STATUS_ERROR_INVALID_CONTEXT;
>
> - if (!(picture_width && picture_height))
> + is_vpp = config_id == PIPE_VIDEO_PROFILE_UNKNOWN && !picture_width &&
> + !picture_height && !flag && !render_targets &&
> + !num_render_targets;
> +
> + if (!(picture_width && picture_height) && !is_vpp)
> return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT;
>
> drv = VL_VA_DRIVER(ctx);
> @@ -163,38 +176,46 @@ vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
> if (!context)
> return VA_STATUS_ERROR_ALLOCATION_FAILED;
>
> - templat.profile = config_id;
> - templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
> - templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
> - templat.width = picture_width;
> - templat.height = picture_height;
> - templat.max_references = num_render_targets;
> - templat.expect_chunked_decode = true;
> -
> - if (u_reduce_video_profile(templat.profile) ==
> - PIPE_VIDEO_FORMAT_MPEG4_AVC)
> - templat.level = u_get_h264_level(templat.width, templat.height,
> - &templat.max_references);
> -
> - context->decoder = drv->pipe->create_video_codec(drv->pipe, &templat);
> - if (!context->decoder) {
> - FREE(context);
> - return VA_STATUS_ERROR_ALLOCATION_FAILED;
> - }
> -
> - if (u_reduce_video_profile(context->decoder->profile) ==
> - PIPE_VIDEO_FORMAT_MPEG4_AVC) {
> - context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
> - if (!context->desc.h264.pps) {
> + if (is_vpp) {
> + context->decoder = NULL;
> + if (!drv->compositor.upload) {
> FREE(context);
> - return VA_STATUS_ERROR_ALLOCATION_FAILED;
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> }
> - context->desc.h264.pps->sps = CALLOC_STRUCT(pipe_h264_sps);
> - if (!context->desc.h264.pps->sps) {
> - FREE(context->desc.h264.pps);
> + } else {
> + templat.profile = config_id;
> + templat.entrypoint = PIPE_VIDEO_ENTRYPOINT_BITSTREAM;
> + templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
> + templat.width = picture_width;
> + templat.height = picture_height;
> + templat.max_references = num_render_targets;
> + templat.expect_chunked_decode = true;
> +
> + if (u_reduce_video_profile(templat.profile) ==
> + PIPE_VIDEO_FORMAT_MPEG4_AVC)
> + templat.level = u_get_h264_level(templat.width, templat.height,
> + &templat.max_references);
> +
> + context->decoder = drv->pipe->create_video_codec(drv->pipe, &templat);
> + if (!context->decoder) {
> FREE(context);
> return VA_STATUS_ERROR_ALLOCATION_FAILED;
> }
> +
> + if (u_reduce_video_profile(context->decoder->profile) ==
> + PIPE_VIDEO_FORMAT_MPEG4_AVC) {
> + context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
> + if (!context->desc.h264.pps) {
> + FREE(context);
> + return VA_STATUS_ERROR_ALLOCATION_FAILED;
> + }
> + context->desc.h264.pps->sps = CALLOC_STRUCT(pipe_h264_sps);
> + if (!context->desc.h264.pps->sps) {
> + FREE(context->desc.h264.pps);
> + FREE(context);
> + return VA_STATUS_ERROR_ALLOCATION_FAILED;
> + }
> + }
> }
>
> context->desc.base.profile = config_id; @@ -214,12 +235,16 @@
> vlVaDestroyContext(VADriverContextP ctx, VAContextID context_id)
>
> drv = VL_VA_DRIVER(ctx);
> context = handle_table_get(drv->htab, context_id);
> - if (u_reduce_video_profile(context->decoder->profile) ==
> - PIPE_VIDEO_FORMAT_MPEG4_AVC) {
> - FREE(context->desc.h264.pps->sps);
> - FREE(context->desc.h264.pps);
> +
> + if (context->decoder) {
> + if (u_reduce_video_profile(context->decoder->profile) ==
> + PIPE_VIDEO_FORMAT_MPEG4_AVC) {
> + FREE(context->desc.h264.pps->sps);
> + FREE(context->desc.h264.pps);
> + }
> + context->decoder->destroy(context->decoder);
> }
> - context->decoder->destroy(context->decoder);
> +
> FREE(context);
> handle_table_remove(drv->htab, context_id);
>
> diff --git a/src/gallium/state_trackers/va/picture.c
> b/src/gallium/state_trackers/va/picture.c
> index 9b94b39..eee6269 100644
> --- a/src/gallium/state_trackers/va/picture.c
> +++ b/src/gallium/state_trackers/va/picture.c
> @@ -32,6 +32,7 @@
> #include "util/u_video.h"
>
> #include "vl/vl_vlc.h"
> +#include "vl/vl_winsys.h"
>
> #include "va_private.h"
>
> @@ -58,6 +59,16 @@ vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID rende
> return VA_STATUS_ERROR_INVALID_SURFACE;
>
> context->target = surf->buffer;
> +
> + if (!context->decoder) {
> + /* VPP */
> + if ((context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
> + context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM) ||
> + context->target->interlaced)
> + return VA_STATUS_ERROR_UNIMPLEMENTED;
> + return VA_STATUS_SUCCESS;
> + }
> +
> context->decoder->begin_frame(context->decoder, context->target,
> NULL);
>
> return VA_STATUS_SUCCESS;
> @@ -521,11 +532,76 @@ handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
> num_buffers, (const void * const*)buffers, sizes);
> }
>
> +static VAStatus
> +handleVAProcPipelineParameterBufferType(vlVaDriver *drv, vlVaContext
> +*context, vlVaBuffer *buf) {
> + struct u_rect src_rect;
> + struct u_rect dst_rect;
> + struct u_rect *dirty_area;
> + vlVaSurface *src_surface;
> + VAProcPipelineParameterBuffer *pipeline_param;
> + struct pipe_surface **surfaces;
> + struct pipe_screen *screen;
> + struct pipe_surface *psurf;
> +
> + if (!drv || !context)
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> + if (!buf || !buf->data)
> + return VA_STATUS_ERROR_INVALID_BUFFER;
> +
> + if (!context->target)
> + return VA_STATUS_ERROR_INVALID_SURFACE;
> +
> + pipeline_param = (VAProcPipelineParameterBuffer *)buf->data;
> +
> + src_surface = handle_table_get(drv->htab, pipeline_param->surface);
> + if (!src_surface || !src_surface->buffer)
> + return VA_STATUS_ERROR_INVALID_SURFACE;
> +
> + screen = drv->pipe->screen;
> +
> + if(src_surface->fence) {
> + screen->fence_finish(screen, src_surface->fence, PIPE_TIMEOUT_INFINITE);
> + screen->fence_reference(screen, &src_surface->fence, NULL);
> + }
> +
> + surfaces = context->target->get_surfaces(context->target);
> +
> + if (!surfaces || !surfaces[0])
> + return VA_STATUS_ERROR_INVALID_SURFACE;
> +
> + psurf = surfaces[0];
> +
> + src_rect.x0 = pipeline_param->surface_region->x;
> + src_rect.y0 = pipeline_param->surface_region->y;
> + src_rect.x1 = pipeline_param->surface_region->x + pipeline_param->surface_region->width;
> + src_rect.y1 = pipeline_param->surface_region->y +
> + pipeline_param->surface_region->height;
> +
> + dst_rect.x0 = pipeline_param->output_region->x;
> + dst_rect.y0 = pipeline_param->output_region->y;
> + dst_rect.x1 = pipeline_param->output_region->x + pipeline_param->output_region->width;
> + dst_rect.y1 = pipeline_param->output_region->y +
> + pipeline_param->output_region->height;
> +
> + dirty_area = vl_screen_get_dirty_area(drv->vscreen);
> +
> + vl_compositor_clear_layers(&drv->cstate);
> + vl_compositor_set_buffer_layer(&drv->cstate, &drv->compositor, 0, src_surface->buffer, &src_rect, NULL, VL_COMPOSITOR_WEAVE);
> + vl_compositor_set_layer_dst_area(&drv->cstate, 0, &dst_rect);
> + vl_compositor_render(&drv->cstate, &drv->compositor, psurf,
> + dirty_area, true);
> +
> + screen->fence_reference(screen, &src_surface->fence, NULL);
> + drv->pipe->flush(drv->pipe, &src_surface->fence, 0);
> +
> + return VA_STATUS_SUCCESS;
> +}
> +
> VAStatus
> vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
> {
> vlVaDriver *drv;
> vlVaContext *context;
> + VAStatus vaStatus = VA_STATUS_SUCCESS;
>
> unsigned i;
>
> @@ -561,13 +637,16 @@ vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buff
> case VASliceDataBufferType:
> handleVASliceDataBufferType(context, buf);
> break;
> + case VAProcPipelineParameterBufferType:
> + vaStatus = handleVAProcPipelineParameterBufferType(drv, context, buf);
> + break;
>
> default:
> break;
> }
> }
>
> - return VA_STATUS_SUCCESS;
> + return vaStatus;
> }
>
> VAStatus
> @@ -587,6 +666,11 @@ vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
> if (!context)
> return VA_STATUS_ERROR_INVALID_CONTEXT;
>
> + if (!context->decoder) {
> + /* VPP */
> + return VA_STATUS_SUCCESS;
> + }
> +
> context->mpeg4.frame_num++;
> context->decoder->end_frame(context->decoder, context->target,
> &context->desc.base);
>
> diff --git a/src/gallium/state_trackers/va/surface.c
> b/src/gallium/state_trackers/va/surface.c
> index aa2b962..23087ab 100644
> --- a/src/gallium/state_trackers/va/surface.c
> +++ b/src/gallium/state_trackers/va/surface.c
> @@ -348,7 +348,8 @@ vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config,
> i = 0;
>
> if (config == PIPE_VIDEO_PROFILE_UNKNOWN) {
> - /* Assume VAEntrypointVideoProc for now. */
> + /* vlVaCreateConfig returns PIPE_VIDEO_PROFILE_UNKNOWN
> + only for VAEntrypointVideoProc. */
> attribs[i].type = VASurfaceAttribPixelFormat;
> attribs[i].value.type = VAGenericValueTypeInteger;
> attribs[i].flags = VA_SURFACE_ATTRIB_GETTABLE |
> VA_SURFACE_ATTRIB_SETTABLE; @@ -640,3 +641,98 @@ no_res:
>
> return VA_STATUS_ERROR_ALLOCATION_FAILED;
> }
> +
> +VAStatus
> +vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context,
> + VAProcFilterType *filters, unsigned int
> +*num_filters) {
> + unsigned int num = 0;
> +
> + if (!ctx)
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> + if (!num_filters || !filters)
> + return VA_STATUS_ERROR_INVALID_PARAMETER;
> +
> + filters[num++] = VAProcFilterNone;
> +
> + *num_filters = num;
> +
> + return VA_STATUS_SUCCESS;
> +}
> +
> +VAStatus
> +vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context,
> + VAProcFilterType type, void *filter_caps,
> + unsigned int *num_filter_caps) {
> + unsigned int i;
> +
> + if (!ctx)
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> + if (!filter_caps || !num_filter_caps)
> + return VA_STATUS_ERROR_INVALID_PARAMETER;
> +
> + i = 0;
> +
> + switch (type) {
> + case VAProcFilterNone:
> + break;
> + case VAProcFilterNoiseReduction:
> + case VAProcFilterDeinterlacing:
> + case VAProcFilterSharpening:
> + case VAProcFilterColorBalance:
> + case VAProcFilterSkinToneEnhancement:
> + return VA_STATUS_ERROR_UNIMPLEMENTED;
> + default:
> + assert(0);
> + }
> +
> + *num_filter_caps = i;
> +
> + return VA_STATUS_SUCCESS;
> +}
> +
> +static VAProcColorStandardType vpp_input_color_standards[VAProcColorStandardCount] = {
> + VAProcColorStandardBT601
> +};
> +
> +static VAProcColorStandardType vpp_output_color_standards[VAProcColorStandardCount] = {
> + VAProcColorStandardBT601
> +};
> +
> +VAStatus
> +vlVaQueryVideoProcPipelineCaps(VADriverContextP ctx, VAContextID context,
> + VABufferID *filters, unsigned int num_filters,
> + VAProcPipelineCaps *pipeline_cap) {
> + unsigned int i = 0;
> +
> + if (!ctx)
> + return VA_STATUS_ERROR_INVALID_CONTEXT;
> +
> + if (!pipeline_cap)
> + return VA_STATUS_ERROR_INVALID_PARAMETER;
> +
> + if (num_filters && !filters)
> + return VA_STATUS_ERROR_INVALID_PARAMETER;
> +
> + pipeline_cap->pipeline_flags = 0;
> + pipeline_cap->filter_flags = 0;
> + pipeline_cap->num_forward_references = 0;
> + pipeline_cap->num_backward_references = 0;
> + pipeline_cap->num_input_color_standards = 1;
> + pipeline_cap->input_color_standards = vpp_input_color_standards;
> + pipeline_cap->num_output_color_standards = 1;
> + pipeline_cap->output_color_standards = vpp_output_color_standards;
> +
> + for (i = 0; i < num_filters; i++) {
> + vlVaBuffer *buf = handle_table_get(VL_VA_DRIVER(ctx)->htab,
> + filters[i]);
> +
> + if (!buf || buf->type >= VABufferTypeMax)
> + return VA_STATUS_ERROR_INVALID_BUFFER;
> + }
> +
> + return VA_STATUS_SUCCESS;
> +}
> diff --git a/src/gallium/state_trackers/va/va_private.h
> b/src/gallium/state_trackers/va/va_private.h
> index 770c7dd..d4d74d6 100644
> --- a/src/gallium/state_trackers/va/va_private.h
> +++ b/src/gallium/state_trackers/va/va_private.h
> @@ -33,6 +33,7 @@
>
> #include <va/va.h>
> #include <va/va_backend.h>
> +#include <va/va_backend_vpp.h>
>
> #include "pipe/p_video_enums.h"
> #include "pipe/p_video_codec.h"
> @@ -319,4 +320,11 @@ VAStatus vlVaCreateSurfaces2(VADriverContextP ctx, unsigned int format, unsigned
> unsigned int num_attribs);
> VAStatus vlVaQuerySurfaceAttributes(VADriverContextP ctx, VAConfigID config, VASurfaceAttrib *attrib_list,
> unsigned int *num_attribs);
> +
> +VAStatus vlVaQueryVideoProcFilters(VADriverContextP ctx, VAContextID context, VAProcFilterType *filters,
> + unsigned int *num_filters);
> +VAStatus vlVaQueryVideoProcFilterCaps(VADriverContextP ctx, VAContextID context, VAProcFilterType type,
> + void *filter_caps, unsigned int
> +*num_filter_caps); VAStatus vlVaQueryVideoProcPipelineCaps(VADriverContextP ctx, VAContextID context, VABufferID *filters,
> + unsigned int num_filters,
> +VAProcPipelineCaps *pipeline_cap);
> #endif //VA_PRIVATE_H
More information about the mesa-dev
mailing list