[RFC] compositor-drm: Add hardware accelerated capture of screen using libva

Kristian Høgsberg hoegsberg at gmail.com
Thu Aug 29 21:37:07 PDT 2013


On Fri, Aug 23, 2013 at 05:15:48PM +0300, Ander Conselvan de Oliveira wrote:
> From: Ander Conselvan de Oliveira <ander.conselvan.de.oliveira at intel.com>
> 
> This patch adds a feature to the DRM backend that uses libva for
> encoding the screen contents in H.264. Screen recording can be
> activated by pressing mod-shift-space q. A file named capture.h264
> will be created in the current directory, which can be muxed into
> an MP4 file with gstreamer using

I tried it out and it worked like a charm.  I have IvyBridge here and
I didn't get the hang you mention.  It's very cool how it encodes h264
direectly and doesn't seem to load the system very when doing so.
Newer Intel GPUs have a dedicated hw video encoder so it shouldn't
take away CPU or GPU resources from whatever weston and clients are
doing.  They're still competing for memory bandwidth of course.  I
wonder what the impact of capturing a GPU intensive app (like a modern
game, potentially under xwayland) might be.

Anyway, I feel like we're better off merging this sooner rather than
later.  The feature is basically working and for all we know the
crasher could be SandyBridge specific.  Do you see a problem in
committing this now and fixing the rest of the issues incrementally?

Kristian

> gst-launch filesrc location=capture.h264 ! h264parse ! mp4mux ! \
>            filesink location=file.mp4
> 
> This is limitted to the DRM compositor in order to avoid a copy when
> submitting the front buffer to libva. The code in vaapi-recorder.c
> takes a dma_buf fd referencing it, does a colorspace conversion using
> the video post processing pipeline and then uses that as input to the
> encoder.
> 
> I'm sending this now so I get comments, but this is not ready for
> prime time yet. I have a somewhat consistent GPU hang when using
> i915 with SandyBridge. Sometimes a page flip never completes. If you
> want to try this anyway and your system get stuck, you might need to
> run the following:
> 
>   # echo 1 > /sys/kernel/debug/dri/0/i915_wedged
> 
> After that, alt-sysrq [rv] should work.
> 
> Once that's fixed it would also be good to make the parameters used by
> the encoder more flexible. For now the QP parameter is hardcoded to 0
> and we have only I and P frames (no B frames), which causes the
> resulting files to be very large.
> ---
>  configure.ac         |    6 +
>  src/Makefile.am      |    6 +
>  src/compositor-drm.c |  109 ++++++
>  src/vaapi-recorder.c | 1062 ++++++++++++++++++++++++++++++++++++++++++++++++++
>  src/vaapi-recorder.h |   35 ++
>  5 files changed, 1218 insertions(+)
>  create mode 100644 src/vaapi-recorder.c
>  create mode 100644 src/vaapi-recorder.h
> 
> diff --git a/configure.ac b/configure.ac
> index fab0b48..e5f6afd 100644
> --- a/configure.ac
> +++ b/configure.ac
> @@ -239,6 +239,11 @@ PKG_CHECK_MODULES(WEBP, [libwebp], [have_webp=yes], [have_webp=no])
>  AS_IF([test "x$have_webp" = "xyes"],
>        [AC_DEFINE([HAVE_WEBP], [1], [Have webp])])
>  
> +PKG_CHECK_MODULES(LIBVA, [libva >= 0.34.0 libva-drm >= 0.34.0], [have_libva=yes], [have_libva=no])
> +AS_IF([test "x$have_libva" = "xyes"],
> +      [AC_DEFINE([HAVE_LIBVA], [1], [Have libva])])
> +AM_CONDITIONAL(ENABLE_LIBVA, test "x$have_libva" = "xyes")
> +
>  AC_CHECK_LIB([jpeg], [jpeg_CreateDecompress], have_jpeglib=yes)
>  if test x$have_jpeglib = xyes; then
>    JPEG_LIBS="-ljpeg"
> @@ -478,4 +483,5 @@ AC_MSG_RESULT([
>  	LCMS2 Support			${have_lcms}
>  	libwebp Support			${have_webp}
>  	libunwind Support		${have_libunwind}
> +	VA H.264 encoding Support	${have_libva}
>  ])
> diff --git a/src/Makefile.am b/src/Makefile.am
> index 929de31..ab69df2 100644
> --- a/src/Makefile.am
> +++ b/src/Makefile.am
> @@ -152,6 +152,12 @@ drm_backend_la_SOURCES =			\
>  	launcher-util.h				\
>  	libbacklight.c				\
>  	libbacklight.h
> +
> +if ENABLE_LIBVA
> +drm_backend_la_SOURCES += vaapi-recorder.c
> +drm_backend_la_LIBADD += $(LIBVA_LIBS)
> +drm_backend_la_CFLAGS += $(LIBVA_CFLAGS)
> +endif
>  endif
>  
>  if ENABLE_WAYLAND_COMPOSITOR
> diff --git a/src/compositor-drm.c b/src/compositor-drm.c
> index b9e3fc9..dca1e6c 100644
> --- a/src/compositor-drm.c
> +++ b/src/compositor-drm.c
> @@ -47,6 +47,7 @@
>  #include "pixman-renderer.h"
>  #include "udev-seat.h"
>  #include "launcher-util.h"
> +#include "vaapi-recorder.h"
>  
>  #ifndef DRM_CAP_TIMESTAMP_MONOTONIC
>  #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
> @@ -75,6 +76,7 @@ struct drm_compositor {
>  	struct {
>  		int id;
>  		int fd;
> +		char *filename;
>  	} drm;
>  	struct gbm_device *gbm;
>  	uint32_t *crtcs;
> @@ -159,6 +161,9 @@ struct drm_output {
>  	pixman_image_t *image[2];
>  	int current_image;
>  	pixman_region32_t previous_damage;
> +
> +	struct vaapi_recorder *recorder;
> +	struct wl_listener recorder_frame_listener;
>  };
>  
>  /*
> @@ -716,6 +721,11 @@ page_flip_handler(int fd, unsigned int frame,
>  	if (!output->vblank_pending) {
>  		msecs = sec * 1000 + usec / 1000;
>  		weston_output_finish_frame(&output->base, msecs);
> +
> +		/* We can't call this from frame_notify, because the output's
> +		 * repaint needed flag is cleared just after that */
> +		if (output->recorder)
> +			weston_output_schedule_repaint(&output->base);
>  	}
>  }
>  
> @@ -1214,6 +1224,7 @@ init_drm(struct drm_compositor *ec, struct udev_device *device)
>  	weston_log("using %s\n", filename);
>  
>  	ec->drm.fd = fd;
> +	ec->drm.filename = strdup(filename);
>  
>  	ret = drmGetCap(fd, DRM_CAP_TIMESTAMP_MONOTONIC, &cap);
>  	if (ret == 0 && cap == 1)
> @@ -2434,6 +2445,102 @@ planes_binding(struct weston_seat *seat, uint32_t time, uint32_t key, void *data
>  	}
>  }
>  
> +#ifdef HAVE_LIBVA
> +static void
> +recorder_frame_notify(struct wl_listener *listener, void *data)
> +{
> +	struct drm_output *output;
> +	struct drm_compositor *c;
> +	int fd, ret;
> +
> +	output = container_of(listener, struct drm_output,
> +			      recorder_frame_listener);
> +	c = (struct drm_compositor *) output->base.compositor;
> +
> +	if (!output->recorder)
> +		return;
> +
> +	ret = drmPrimeHandleToFD(c->drm.fd, output->current->handle,
> +				 DRM_CLOEXEC, &fd);
> +	if (ret) {
> +		weston_log("[libva recorder] "
> +			   "failed to create prime fd for front buffer\n");
> +		return;
> +	}
> +
> +	vaapi_recorder_frame(output->recorder, fd, output->current->stride / 4);
> +
> +	close(fd);
> +}
> +
> +static void *
> +create_recorder(struct drm_compositor *c, int width, int height,
> +		const char *filename)
> +{
> +	int fd;
> +	drm_magic_t magic;
> +
> +	fd = open(c->drm.filename, O_RDWR | O_CLOEXEC);
> +	if (fd < 0)
> +		return NULL;
> +
> +	drmGetMagic(fd, &magic);
> +	drmAuthMagic(c->drm.fd, magic);
> +
> +	return vaapi_recorder_create(fd, width, height, filename);
> +}
> +
> +static void
> +recorder_binding(struct weston_seat *seat, uint32_t time, uint32_t key,
> +		 void *data)
> +{
> +	struct drm_compositor *c = data;
> +	struct drm_output *output;
> +	int width, height;
> +
> +	output = container_of(c->base.output_list.next,
> +			      struct drm_output, base.link);
> +
> +	if (!output->recorder) {
> +		width = output->base.current->width;
> +		height = output->base.current->height;
> +
> +		output->recorder =
> +			create_recorder(c, width, height, "capture.h264");
> +		if (!output->recorder) {
> +			weston_log("failed to create vaapi recorder\n");
> +			return;
> +		}
> +
> +		output->base.disable_planes++;
> +
> +		output->recorder_frame_listener.notify = recorder_frame_notify;
> +		wl_signal_add(&output->base.frame_signal,
> +			      &output->recorder_frame_listener);
> +
> +		weston_output_schedule_repaint(&output->base);
> +
> +		weston_log("[libva recorder] initialized\n");
> +	} else {
> +		vaapi_recorder_destroy(output->recorder);
> +		/* FIXME: close drm fd passed to recorder */
> +		output->recorder = NULL;
> +
> +		output->base.disable_planes--;
> +
> +		wl_list_remove(&output->recorder_frame_listener.link);
> +		weston_log("[libva recorder] done\n");
> +	}
> +}
> +#else
> +static void
> +recorder_binding(struct weston_seat *seat, uint32_t time, uint32_t key,
> +		 void *data)
> +{
> +	weston_log("Compiled without libva support\n");
> +}
> +#endif
> +
>  static struct weston_compositor *
>  drm_compositor_create(struct wl_display *display,
>  		      int connector, const char *seat_id, int tty, int pixman,
> @@ -2567,6 +2674,8 @@ drm_compositor_create(struct wl_display *display,
>  					    planes_binding, ec);
>  	weston_compositor_add_debug_binding(&ec->base, KEY_V,
>  					    planes_binding, ec);
> +	weston_compositor_add_debug_binding(&ec->base, KEY_Q,
> +					    recorder_binding, ec);
>  
>  	return &ec->base;
>  
> diff --git a/src/vaapi-recorder.c b/src/vaapi-recorder.c
> new file mode 100644
> index 0000000..c0210f0
> --- /dev/null
> +++ b/src/vaapi-recorder.c
> @@ -0,0 +1,1062 @@
> +/*
> + * Copyright © 2013 Intel Corporation
> + *
> + * Permission to use, copy, modify, distribute, and sell this software and
> + * its documentation for any purpose is hereby granted without fee, provided
> + * that the above copyright notice appear in all copies and that both that
> + * copyright notice and this permission notice appear in supporting
> + * documentation, and that the name of the copyright holders not be used in
> + * advertising or publicity pertaining to distribution of the software
> + * without specific, written prior permission.  The copyright holders make
> + * no representations about the suitability of this software for any
> + * purpose.  It is provided "as is" without express or implied warranty.
> + *
> + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
> + * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
> + * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
> + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
> + * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
> + * CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
> + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
> + */
> +
> +/* Copyright (c) 2012 Intel Corporation. All Rights Reserved.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the
> + * "Software"), to deal in the Software without restriction, including
> + * without limitation the rights to use, copy, modify, merge, publish,
> + * distribute, sub license, and/or sell copies of the Software, and to
> + * permit persons to whom the Software is furnished to do so, subject to
> + * the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the
> + * next paragraph) shall be included in all copies or substantial portions
> + * of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
> + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
> + * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
> + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
> + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
> + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
> + */
> +
> +#include <stdlib.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +
> +#include <sys/types.h>
> +#include <sys/stat.h>
> +#include <fcntl.h>
> +
> +
> +#include <va/va.h>
> +#include <va/va_drm.h>
> +#include <va/va_drmcommon.h>
> +#include <va/va_enc_h264.h>
> +#include <va/va_vpp.h>
> +
> +#include "compositor.h"
> +#include "vaapi-recorder.h"
> +
> +#define NAL_REF_IDC_NONE        0
> +#define NAL_REF_IDC_LOW         1
> +#define NAL_REF_IDC_MEDIUM      2
> +#define NAL_REF_IDC_HIGH        3
> +
> +#define NAL_NON_IDR             1
> +#define NAL_IDR                 5
> +#define NAL_SPS                 7
> +#define NAL_PPS                 8
> +#define NAL_SEI                 6
> +
> +#define SLICE_TYPE_P            0
> +#define SLICE_TYPE_B            1
> +#define SLICE_TYPE_I            2
> +
> +#define ENTROPY_MODE_CAVLC      0
> +#define ENTROPY_MODE_CABAC      1
> +
> +#define PROFILE_IDC_BASELINE    66
> +#define PROFILE_IDC_MAIN        77
> +#define PROFILE_IDC_HIGH        100
> +
> +struct vaapi_recorder {
> +	int output_fd;
> +	int width, height;
> +	int frame_count;
> +
> +	VADisplay va_dpy;
> +
> +	/* video post processing is used for colorspace conversion */
> +	struct {
> +		VAConfigID cfg;
> +		VAContextID ctx;
> +		VABufferID pipeline_buf;
> +		VASurfaceID output;
> +	} vpp;
> +
> +	struct {
> +		VAConfigID cfg;
> +		VAContextID ctx;
> +		VASurfaceID reference_picture[3];
> +
> +		int intra_period;
> +		int output_size;
> +		int constraint_set_flag;
> +
> +		struct {
> +			VAEncSequenceParameterBufferH264 seq;
> +			VAEncPictureParameterBufferH264 pic;
> +			VAEncSliceParameterBufferH264 slice;
> +		} param;
> +	} encoder;
> +};
> +
> +/* bistream code used for writing the packed headers */
> +
> +#define BITSTREAM_ALLOCATE_STEPPING	 4096
> +
> +struct bitstream {
> +	unsigned int *buffer;
> +	int bit_offset;
> +	int max_size_in_dword;
> +};
> +
> +static unsigned int
> +va_swap32(unsigned int val)
> +{
> +	unsigned char *pval = (unsigned char *)&val;
> +
> +	return ((pval[0] << 24) |
> +		(pval[1] << 16) |
> +		(pval[2] << 8)  |
> +		(pval[3] << 0));
> +}
> +
> +static void
> +bitstream_start(struct bitstream *bs)
> +{
> +	bs->max_size_in_dword = BITSTREAM_ALLOCATE_STEPPING;
> +	bs->buffer = calloc(bs->max_size_in_dword * sizeof(int), 1);
> +	bs->bit_offset = 0;
> +}
> +
> +static void
> +bitstream_end(struct bitstream *bs)
> +{
> +	int pos = (bs->bit_offset >> 5);
> +	int bit_offset = (bs->bit_offset & 0x1f);
> +	int bit_left = 32 - bit_offset;
> +
> +	if (bit_offset) {
> +		bs->buffer[pos] = va_swap32((bs->buffer[pos] << bit_left));
> +	}
> +}
> +
> +static void
> +bitstream_put_ui(struct bitstream *bs, unsigned int val, int size_in_bits)
> +{
> +	int pos = (bs->bit_offset >> 5);
> +	int bit_offset = (bs->bit_offset & 0x1f);
> +	int bit_left = 32 - bit_offset;
> +
> +	if (!size_in_bits)
> +		return;
> +
> +	bs->bit_offset += size_in_bits;
> +
> +	if (bit_left > size_in_bits) {
> +		bs->buffer[pos] = (bs->buffer[pos] << size_in_bits | val);
> +		return;
> +	}
> +
> +	size_in_bits -= bit_left;
> +	bs->buffer[pos] =
> +		(bs->buffer[pos] << bit_left) | (val >> size_in_bits);
> +	bs->buffer[pos] = va_swap32(bs->buffer[pos]);
> +
> +	if (pos + 1 == bs->max_size_in_dword) {
> +		bs->max_size_in_dword += BITSTREAM_ALLOCATE_STEPPING;
> +		bs->buffer =
> +			realloc(bs->buffer,
> +				bs->max_size_in_dword * sizeof(unsigned int));
> +	}
> +
> +	bs->buffer[pos + 1] = val;
> +}
> +
> +static void
> +bitstream_put_ue(struct bitstream *bs, unsigned int val)
> +{
> +	int size_in_bits = 0;
> +	int tmp_val = ++val;
> +
> +	while (tmp_val) {
> +		tmp_val >>= 1;
> +		size_in_bits++;
> +	}
> +
> +	bitstream_put_ui(bs, 0, size_in_bits - 1); // leading zero
> +	bitstream_put_ui(bs, val, size_in_bits);
> +}
> +
> +static void
> +bitstream_put_se(struct bitstream *bs, int val)
> +{
> +	unsigned int new_val;
> +
> +	if (val <= 0)
> +		new_val = -2 * val;
> +	else
> +		new_val = 2 * val - 1;
> +
> +	bitstream_put_ue(bs, new_val);
> +}
> +
> +static void
> +bitstream_byte_aligning(struct bitstream *bs, int bit)
> +{
> +	int bit_offset = (bs->bit_offset & 0x7);
> +	int bit_left = 8 - bit_offset;
> +	int new_val;
> +
> +	if (!bit_offset)
> +		return;
> +
> +	if (bit)
> +		new_val = (1 << bit_left) - 1;
> +	else
> +		new_val = 0;
> +
> +	bitstream_put_ui(bs, new_val, bit_left);
> +}
> +
> +static VAStatus
> +encoder_create_config(struct vaapi_recorder *r)
> +{
> +	VAConfigAttrib attrib[2];
> +	VAStatus status;
> +
> +	/* FIXME: should check if VAEntrypointEncSlice is supported */
> +
> +	/* FIXME: should check if specified attributes are supported */
> +
> +	attrib[0].type = VAConfigAttribRTFormat;
> +	attrib[0].value = VA_RT_FORMAT_YUV420;
> +
> +	attrib[1].type = VAConfigAttribRateControl;
> +	attrib[1].value = VA_RC_CQP;
> +
> +	status = vaCreateConfig(r->va_dpy, VAProfileH264Main,
> +				VAEntrypointEncSlice, attrib, 2,
> +				&r->encoder.cfg);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	status = vaCreateContext(r->va_dpy, r->encoder.cfg,
> +				 r->width, r->height, VA_PROGRESSIVE, 0, 0,
> +				 &r->encoder.ctx);
> +	if (status != VA_STATUS_SUCCESS) {
> +		vaDestroyConfig(r->va_dpy, r->encoder.cfg);
> +		return status;
> +	}
> +
> +	return VA_STATUS_SUCCESS;
> +}
> +
> +static void
> +encoder_destroy_config(struct vaapi_recorder *r)
> +{
> +	vaDestroyContext(r->va_dpy, r->encoder.ctx);
> +	vaDestroyConfig(r->va_dpy, r->encoder.cfg);
> +}
> +
> +static void
> +encoder_init_seq_parameters(struct vaapi_recorder *r)
> +{
> +	int width_in_mbs, height_in_mbs;
> +	int frame_cropping_flag = 0;
> +	int frame_crop_bottom_offset = 0;
> +
> +	width_in_mbs = (r->width + 15) / 16;
> +	height_in_mbs = (r->height + 15) / 16;
> +
> +	r->encoder.param.seq.level_idc = 41;
> +	r->encoder.param.seq.intra_period = r->encoder.intra_period;
> +	r->encoder.param.seq.max_num_ref_frames = 4;
> +	r->encoder.param.seq.picture_width_in_mbs = width_in_mbs;
> +	r->encoder.param.seq.picture_height_in_mbs = height_in_mbs;
> +	r->encoder.param.seq.seq_fields.bits.frame_mbs_only_flag = 1;
> +
> +	/* Tc = num_units_in_tick / time_scale */
> +	r->encoder.param.seq.time_scale = 1800;
> +	r->encoder.param.seq.num_units_in_tick = 15;
> +
> +	if (height_in_mbs * 16 - r->height > 0) {
> +		frame_cropping_flag = 1;
> +		frame_crop_bottom_offset = (height_in_mbs * 16 - r->height) / 2;
> +	}
> +
> +	r->encoder.param.seq.frame_cropping_flag = frame_cropping_flag;
> +	r->encoder.param.seq.frame_crop_bottom_offset = frame_crop_bottom_offset;
> +
> +	r->encoder.param.seq.seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = 2;
> +}
> +
> +static VABufferID
> +encoder_update_seq_parameters(struct vaapi_recorder *r)
> +{
> +	VABufferID seq_buf;
> +	VAStatus status;
> +
> +	status = vaCreateBuffer(r->va_dpy, r->encoder.ctx,
> +				VAEncSequenceParameterBufferType,
> +				sizeof(r->encoder.param.seq),
> +				1, &r->encoder.param.seq,
> +				&seq_buf);
> +
> +	if (status == VA_STATUS_SUCCESS)
> +		return seq_buf;
> +	else
> +		return VA_INVALID_ID;
> +}
> +
> +static void
> +encoder_init_pic_parameters(struct vaapi_recorder *r)
> +{
> +	VAEncPictureParameterBufferH264 *pic = &r->encoder.param.pic;
> +
> +	pic->pic_init_qp = 0;
> +
> +	/* ENTROPY_MODE_CABAC */
> +	pic->pic_fields.bits.entropy_coding_mode_flag = 1;
> +
> +	pic->pic_fields.bits.deblocking_filter_control_present_flag = 1;
> +}
> +
> +static VABufferID
> +encoder_update_pic_parameters(struct vaapi_recorder *r,
> +			      VABufferID output_buf)
> +{
> +	VAEncPictureParameterBufferH264 *pic = &r->encoder.param.pic;
> +	VAStatus status;
> +	VABufferID pic_param_buf;
> +	VASurfaceID curr_pic, pic0;
> +
> +	curr_pic = r->encoder.reference_picture[r->frame_count % 2];
> +	pic0 = r->encoder.reference_picture[(r->frame_count + 1) % 2];
> +
> +	pic->CurrPic.picture_id = curr_pic;
> +	pic->CurrPic.TopFieldOrderCnt = r->frame_count * 2;
> +	pic->ReferenceFrames[0].picture_id = pic0;
> +	pic->ReferenceFrames[1].picture_id = r->encoder.reference_picture[2];
> +	pic->ReferenceFrames[2].picture_id = VA_INVALID_ID;
> +
> +	pic->coded_buf = output_buf;
> +	pic->frame_num = r->frame_count;
> +
> +	pic->pic_fields.bits.idr_pic_flag = (r->frame_count == 0);
> +	pic->pic_fields.bits.reference_pic_flag = 1;
> +
> +	status = vaCreateBuffer(r->va_dpy, r->encoder.ctx,
> +				VAEncPictureParameterBufferType,
> +				sizeof(VAEncPictureParameterBufferH264), 1,
> +				pic, &pic_param_buf);
> +
> +	if (status == VA_STATUS_SUCCESS)
> +		return pic_param_buf;
> +	else
> +		return VA_INVALID_ID;
> +}
> +
> +static VABufferID
> +encoder_update_slice_parameter(struct vaapi_recorder *r, int slice_type)
> +{
> +	VABufferID slice_param_buf;
> +	VAStatus status;
> +
> +	int width_in_mbs = (r->width + 15) / 16;
> +	int height_in_mbs = (r->height + 15) / 16;
> +
> +	memset(&r->encoder.param.slice, 0, sizeof r->encoder.param.slice);
> +
> +	r->encoder.param.slice.num_macroblocks = width_in_mbs * height_in_mbs;
> +	r->encoder.param.slice.slice_type = slice_type;
> +
> +	r->encoder.param.slice.slice_alpha_c0_offset_div2 = 2;
> +	r->encoder.param.slice.slice_beta_offset_div2 = 2;
> +
> +	status = vaCreateBuffer(r->va_dpy, r->encoder.ctx,
> +				VAEncSliceParameterBufferType,
> +				sizeof(r->encoder.param.slice), 1,
> +				&r->encoder.param.slice,
> +				&slice_param_buf);
> +
> +	if (status == VA_STATUS_SUCCESS)
> +		return slice_param_buf;
> +	else
> +		return VA_INVALID_ID;
> +}
> +
> +static VABufferID
> +encoder_update_misc_hdr_parameter(struct vaapi_recorder *r)
> +{
> +	VAEncMiscParameterBuffer *misc_param;
> +	VAEncMiscParameterHRD *hrd;
> +	VABufferID buffer;
> +	VAStatus status;
> +
> +	int total_size =
> +		sizeof(VAEncMiscParameterBuffer) +
> +		sizeof(VAEncMiscParameterRateControl);
> +
> +	status = vaCreateBuffer(r->va_dpy, r->encoder.ctx,
> +				VAEncMiscParameterBufferType, total_size,
> +				1, NULL, &buffer);
> +	if (status != VA_STATUS_SUCCESS)
> +		return VA_INVALID_ID;
> +
> +	status = vaMapBuffer(r->va_dpy, buffer, (void **) &misc_param);
> +	if (status != VA_STATUS_SUCCESS) {
> +		vaDestroyBuffer(r->va_dpy, buffer);
> +		return VA_INVALID_ID;
> +	}
> +
> +	misc_param->type = VAEncMiscParameterTypeHRD;
> +	hrd = (VAEncMiscParameterHRD *) misc_param->data;
> +
> +	hrd->initial_buffer_fullness = 0;
> +	hrd->buffer_size = 0;
> +
> +	vaUnmapBuffer(r->va_dpy, buffer);
> +
> +	return buffer;
> +}
> +
> +static int
> +setup_encoder(struct vaapi_recorder *r)
> +{
> +	VAStatus status;
> +
> +	status = encoder_create_config(r);
> +	if (status != VA_STATUS_SUCCESS) {
> +		return -1;
> +	}
> +
> +	status = vaCreateSurfaces(r->va_dpy, VA_RT_FORMAT_YUV420,
> +				  r->width, r->height,
> +				  r->encoder.reference_picture, 3,
> +				  NULL, 0);
> +	if (status != VA_STATUS_SUCCESS) {
> +		encoder_destroy_config(r);
> +		return -1;
> +	}
> +
> +	/* VAProfileH264Main */
> +	r->encoder.constraint_set_flag |= (1 << 1); /* Annex A.2.2 */
> +
> +	r->encoder.output_size = r->width * r->height;
> +
> +	r->encoder.intra_period = 30;
> +
> +	encoder_init_seq_parameters(r);
> +	encoder_init_pic_parameters(r);
> +
> +	return 0;
> +}
> +
> +static void
> +encoder_destroy(struct vaapi_recorder *r)
> +{
> +	vaDestroySurfaces(r->va_dpy, r->encoder.reference_picture, 3);
> +
> +	encoder_destroy_config(r);
> +}
> +
> +static void
> +nal_start_code_prefix(struct bitstream *bs)
> +{
> +	bitstream_put_ui(bs, 0x00000001, 32);
> +}
> +
> +static void
> +nal_header(struct bitstream *bs, int nal_ref_idc, int nal_unit_type)
> +{
> +	/* forbidden_zero_bit: 0 */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	bitstream_put_ui(bs, nal_ref_idc, 2);
> +	bitstream_put_ui(bs, nal_unit_type, 5);
> +}
> +
> +static void
> +rbsp_trailing_bits(struct bitstream *bs)
> +{
> +	bitstream_put_ui(bs, 1, 1);
> +	bitstream_byte_aligning(bs, 0);
> +}
> +
> +static void sps_rbsp(struct bitstream *bs,
> +		     VAEncSequenceParameterBufferH264 *seq,
> +		     int constraint_set_flag)
> +{
> +	int i;
> +
> +	bitstream_put_ui(bs, PROFILE_IDC_MAIN, 8);
> +
> +	/* constraint_set[0-3] flag */
> +	for (i = 0; i < 4; i++) {
> +		int set = (constraint_set_flag & (1 << i)) ? 1 : 0;
> +		bitstream_put_ui(bs, set, 1);
> +	}
> +
> +	/* reserved_zero_4bits */
> +	bitstream_put_ui(bs, 0, 4);
> +	bitstream_put_ui(bs, seq->level_idc, 8);
> +	bitstream_put_ue(bs, seq->seq_parameter_set_id);
> +
> +	bitstream_put_ue(bs, seq->seq_fields.bits.log2_max_frame_num_minus4);
> +	bitstream_put_ue(bs, seq->seq_fields.bits.pic_order_cnt_type);
> +	bitstream_put_ue(bs,
> +			 seq->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4);
> +
> +	bitstream_put_ue(bs, seq->max_num_ref_frames);
> +
> +	/* gaps_in_frame_num_value_allowed_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	/* pic_width_in_mbs_minus1, pic_height_in_map_units_minus1 */
> +	bitstream_put_ue(bs, seq->picture_width_in_mbs - 1);
> +	bitstream_put_ue(bs, seq->picture_height_in_mbs - 1);
> +
> +	bitstream_put_ui(bs, seq->seq_fields.bits.frame_mbs_only_flag, 1);
> +	bitstream_put_ui(bs, seq->seq_fields.bits.direct_8x8_inference_flag, 1);
> +
> +	bitstream_put_ui(bs, seq->frame_cropping_flag, 1);
> +
> +	if (seq->frame_cropping_flag) {
> +		bitstream_put_ue(bs, seq->frame_crop_left_offset);
> +		bitstream_put_ue(bs, seq->frame_crop_right_offset);
> +		bitstream_put_ue(bs, seq->frame_crop_top_offset);
> +		bitstream_put_ue(bs, seq->frame_crop_bottom_offset);
> +	}
> +
> +	/* vui_parameters_present_flag */
> +	bitstream_put_ui(bs, 1, 1);
> +
> +	/* aspect_ratio_info_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +	/* overscan_info_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	/* video_signal_type_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +	/* chroma_loc_info_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	/* timing_info_present_flag */
> +	bitstream_put_ui(bs, 1, 1);
> +	bitstream_put_ui(bs, seq->num_units_in_tick, 32);
> +	bitstream_put_ui(bs, seq->time_scale, 32);
> +	/* fixed_frame_rate_flag */
> +	bitstream_put_ui(bs, 1, 1);
> +
> +	/* nal_hrd_parameters_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	/* vcl_hrd_parameters_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	/* low_delay_hrd_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	/* pic_struct_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +	/* bitstream_restriction_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	rbsp_trailing_bits(bs);
> +}
> +
> +static void pps_rbsp(struct bitstream *bs,
> +		     VAEncPictureParameterBufferH264 *pic)
> +{
> +	/* pic_parameter_set_id, seq_parameter_set_id */
> +	bitstream_put_ue(bs, pic->pic_parameter_set_id);
> +	bitstream_put_ue(bs, pic->seq_parameter_set_id);
> +
> +	bitstream_put_ui(bs, pic->pic_fields.bits.entropy_coding_mode_flag, 1);
> +
> +	/* pic_order_present_flag: 0 */
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	/* num_slice_groups_minus1 */
> +	bitstream_put_ue(bs, 0);
> +
> +	bitstream_put_ue(bs, pic->num_ref_idx_l0_active_minus1);
> +	bitstream_put_ue(bs, pic->num_ref_idx_l1_active_minus1);
> +
> +	bitstream_put_ui(bs, pic->pic_fields.bits.weighted_pred_flag, 1);
> +	bitstream_put_ui(bs, pic->pic_fields.bits.weighted_bipred_idc, 2);
> +
> +	/* pic_init_qp_minus26, pic_init_qs_minus26, chroma_qp_index_offset */
> +	bitstream_put_se(bs, pic->pic_init_qp - 26);
> +	bitstream_put_se(bs, 0);
> +	bitstream_put_se(bs, 0);
> +
> +	bitstream_put_ui(bs, pic->pic_fields.bits.deblocking_filter_control_present_flag, 1);
> +
> +	/* constrained_intra_pred_flag, redundant_pic_cnt_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +	bitstream_put_ui(bs, 0, 1);
> +
> +	bitstream_put_ui(bs, pic->pic_fields.bits.transform_8x8_mode_flag, 1);
> +
> +	/* pic_scaling_matrix_present_flag */
> +	bitstream_put_ui(bs, 0, 1);
> +	bitstream_put_se(bs, pic->second_chroma_qp_index_offset );
> +
> +	rbsp_trailing_bits(bs);
> +}
> +
> +static int
> +build_packed_pic_buffer(struct vaapi_recorder *r,
> +			void **header_buffer)
> +{
> +	struct bitstream bs;
> +
> +	bitstream_start(&bs);
> +	nal_start_code_prefix(&bs);
> +	nal_header(&bs, NAL_REF_IDC_HIGH, NAL_PPS);
> +	pps_rbsp(&bs, &r->encoder.param.pic);
> +	bitstream_end(&bs);
> +
> +	*header_buffer = bs.buffer;
> +	return bs.bit_offset;
> +}
> +
> +static int
> +build_packed_seq_buffer(struct vaapi_recorder *r,
> +			void **header_buffer)
> +{
> +	struct bitstream bs;
> +
> +	bitstream_start(&bs);
> +	nal_start_code_prefix(&bs);
> +	nal_header(&bs, NAL_REF_IDC_HIGH, NAL_SPS);
> +	sps_rbsp(&bs, &r->encoder.param.seq, r->encoder.constraint_set_flag);
> +	bitstream_end(&bs);
> +
> +	*header_buffer = bs.buffer;
> +	return bs.bit_offset;
> +}
> +
> +static int
> +create_packed_header_buffers(struct vaapi_recorder *r, VABufferID *buffers,
> +			     VAEncPackedHeaderType type,
> +			     void *data, int bit_length)
> +{
> +	VAEncPackedHeaderParameterBuffer packed_header;
> +	VAStatus status;
> +
> +	packed_header.type = type;
> +	packed_header.bit_length = bit_length;
> +	packed_header.has_emulation_bytes = 0;
> +
> +	status = vaCreateBuffer(r->va_dpy, r->encoder.ctx,
> +				VAEncPackedHeaderParameterBufferType,
> +				sizeof packed_header, 1, &packed_header,
> +				&buffers[0]);
> +	if (status != VA_STATUS_SUCCESS)
> +		return 0;
> +
> +	status = vaCreateBuffer(r->va_dpy, r->encoder.ctx,
> +				VAEncPackedHeaderDataBufferType,
> +				(bit_length + 7) / 8, 1, data, &buffers[1]);
> +	if (status != VA_STATUS_SUCCESS) {
> +		vaDestroyBuffer(r->va_dpy, buffers[0]);
> +		return 0;
> +	}
> +
> +	return 2;
> +}
> +
> +static int
> +encoder_prepare_headers(struct vaapi_recorder *r, VABufferID *buffers)
> +{
> +	VABufferID *p;
> +
> +	int bit_length;
> +	void *data;
> +
> +	p = buffers;
> +
> +	bit_length = build_packed_seq_buffer(r, &data);
> +	p += create_packed_header_buffers(r, p, VAEncPackedHeaderSequence,
> +					  data, bit_length);
> +	free(data);
> +
> +	bit_length = build_packed_pic_buffer(r, &data);
> +	p += create_packed_header_buffers(r, p, VAEncPackedHeaderPicture,
> +					  data, bit_length);
> +	free(data);
> +
> +	return p - buffers;
> +}
> +
> +static VAStatus
> +encoder_render_picture(struct vaapi_recorder *r, VASurfaceID input,
> +		       VABufferID *buffers, int count)
> +{
> +	VAStatus status;
> +
> +	status = vaBeginPicture(r->va_dpy, r->encoder.ctx, input);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	status = vaRenderPicture(r->va_dpy, r->encoder.ctx, buffers, count);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	status = vaEndPicture(r->va_dpy, r->encoder.ctx);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	return vaSyncSurface(r->va_dpy, input);
> +}
> +
> +static VABufferID
> +encoder_create_output_buffer(struct vaapi_recorder *r)
> +{
> +	VABufferID output_buf;
> +	VAStatus status;
> +
> +	status = vaCreateBuffer(r->va_dpy, r->encoder.ctx,
> +				VAEncCodedBufferType, r->encoder.output_size,
> +				1, NULL, &output_buf);
> +	if (status == VA_STATUS_SUCCESS)
> +		return output_buf;
> +	else
> +		return VA_INVALID_ID;
> +}
> +
> +static int
> +encoder_write_output(struct vaapi_recorder *r, VABufferID output_buf)
> +{
> +	VACodedBufferSegment *segment;
> +	VAStatus status;
> +	int count;
> +
> +	status = vaMapBuffer(r->va_dpy, output_buf, (void **) &segment);
> +	if (status != VA_STATUS_SUCCESS)
> +		return -1;
> +
> +	if (segment->status & VA_CODED_BUF_STATUS_SLICE_OVERFLOW_MASK) {
> +		r->encoder.output_size *= 2;
> +		vaUnmapBuffer(r->va_dpy, output_buf);
> +		return -1;
> +	}
> +
> +	count = write(r->output_fd, segment->buf, segment->size);
> +
> +	vaUnmapBuffer(r->va_dpy, output_buf);
> +
> +	return count;
> +}
> +
> +static void
> +encoder_encode(struct vaapi_recorder *r, VASurfaceID input)
> +{
> +	VABufferID output_buf = VA_INVALID_ID;
> +
> +	VABufferID buffers[8];
> +	int count = 0;
> +
> +	int slice_type;
> +	int ret, i;
> +
> +	if ((r->frame_count % r->encoder.intra_period) == 0)
> +		slice_type = SLICE_TYPE_I;
> +	else
> +		slice_type = SLICE_TYPE_P;
> +
> +	buffers[count++] = encoder_update_seq_parameters(r);
> +	buffers[count++] = encoder_update_misc_hdr_parameter(r);
> +	buffers[count++] = encoder_update_slice_parameter(r, slice_type);
> +
> +	for (i = 0; i < count; i++)
> +		if (buffers[i] == VA_INVALID_ID)
> +			goto bail;
> +
> +	if (r->frame_count == 0)
> +		count += encoder_prepare_headers(r, buffers + count);
> +
> +	do {
> +		output_buf = encoder_create_output_buffer(r);
> +		if (output_buf == VA_INVALID_ID)
> +			goto bail;
> +
> +		buffers[count++] =
> +			encoder_update_pic_parameters(r, output_buf);
> +		if (buffers[count - 1] == VA_INVALID_ID)
> +			goto bail;
> +
> +		encoder_render_picture(r, input, buffers, count);
> +		ret = encoder_write_output(r, output_buf);
> +
> +		vaDestroyBuffer(r->va_dpy, output_buf);
> +		output_buf = VA_INVALID_ID;
> +
> +		vaDestroyBuffer(r->va_dpy, buffers[--count]);
> +	} while (ret < 0);
> +
> +	for (i = 0; i < count; i++)
> +		vaDestroyBuffer(r->va_dpy, buffers[i]);
> +
> +	r->frame_count++;
> +	return;
> +
> +bail:
> +	for (i = 0; i < count; i++)
> +		vaDestroyBuffer(r->va_dpy, buffers[i]);
> +	if (output_buf != VA_INVALID_ID)
> +		vaDestroyBuffer(r->va_dpy, output_buf);
> +}
> +
> +
> +static int
> +setup_vpp(struct vaapi_recorder *r)
> +{
> +	VAStatus status;
> +
> +	status = vaCreateConfig(r->va_dpy, VAProfileNone,
> +				VAEntrypointVideoProc, NULL, 0,
> +				&r->vpp.cfg);
> +	if (status != VA_STATUS_SUCCESS) {
> +		weston_log("vaapi: failed to create VPP config\n");
> +		return -1;
> +	}
> +
> +	status = vaCreateContext(r->va_dpy, r->vpp.cfg, r->width, r->height,
> +				 0, NULL, 0, &r->vpp.ctx);
> +	if (status != VA_STATUS_SUCCESS) {
> +		weston_log("vaapi: failed to create VPP context\n");
> +		goto err_cfg;
> +	}
> +
> +	status = vaCreateBuffer(r->va_dpy, r->vpp.ctx,
> +				VAProcPipelineParameterBufferType,
> +				sizeof(VAProcPipelineParameterBuffer),
> +				1, NULL, &r->vpp.pipeline_buf);
> +	if (status != VA_STATUS_SUCCESS) {
> +		weston_log("vaapi: failed to create VPP pipeline buffer\n");
> +		goto err_ctx;
> +	}
> +
> +	status = vaCreateSurfaces(r->va_dpy, VA_RT_FORMAT_YUV420,
> +				  r->width, r->height, &r->vpp.output, 1,
> +				  NULL, 0);
> +	if (status != VA_STATUS_SUCCESS) {
> +		weston_log("vaapi: failed to create YUV surface\n");
> +		goto err_buf;
> +	}
> +
> +	return 0;
> +
> +err_buf:
> +	vaDestroyBuffer(r->va_dpy, r->vpp.pipeline_buf);
> +err_ctx:
> +	vaDestroyConfig(r->va_dpy, r->vpp.ctx);
> +err_cfg:
> +	vaDestroyConfig(r->va_dpy, r->vpp.cfg);
> +
> +	return -1;
> +}
> +
> +static void
> +vpp_destroy(struct vaapi_recorder *r)
> +{
> +	vaDestroySurfaces(r->va_dpy, &r->vpp.output, 1);
> +	vaDestroyBuffer(r->va_dpy, r->vpp.pipeline_buf);
> +	vaDestroyConfig(r->va_dpy, r->vpp.ctx);
> +	vaDestroyConfig(r->va_dpy, r->vpp.cfg);
> +}
> +
> +struct vaapi_recorder *
> +vaapi_recorder_create(int drm_fd, int width, int height, const char *filename)
> +{
> +	struct vaapi_recorder *r;
> +	VAStatus status;
> +	int major, minor;
> +	int flags;
> +
> +	r = calloc(1, sizeof *r);
> +	if (!r)
> +		return NULL;
> +
> +	r->width = width;
> +	r->height = height;
> +
> +	flags = O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC;
> +	r->output_fd = open(filename, flags, 0644);
> +
> +	if (r->output_fd < 0)
> +		goto err_free;
> +
> +	r->va_dpy = vaGetDisplayDRM(drm_fd);
> +	if (!r->va_dpy) {
> +		weston_log("failed to create VA display\n");
> +		goto err_fd;
> +	}
> +
> +	status = vaInitialize(r->va_dpy, &major, &minor);
> +	if (status != VA_STATUS_SUCCESS) {
> +		weston_log("vaapi: failed to initialize display\n");
> +		goto err_fd;
> +	}
> +
> +	if (setup_vpp(r) < 0) {
> +		weston_log("vaapi: failed to initialize VPP pipeline\n");
> +		goto err_va_dpy;
> +	}
> +
> +	if (setup_encoder(r) < 0) {
> +		goto err_vpp;
> +	}
> +
> +	return r;
> +
> +err_vpp:
> +	vpp_destroy(r);
> +err_va_dpy:
> +	vaTerminate(r->va_dpy);
> +err_fd:
> +	close(r->output_fd);
> +err_free:
> +	free(r);
> +
> +	return NULL;
> +}
> +
> +void
> +vaapi_recorder_destroy(struct vaapi_recorder *r)
> +{
> +	encoder_destroy(r);
> +	vpp_destroy(r);
> +
> +	vaTerminate(r->va_dpy);
> +
> +	close(r->output_fd);
> +
> +	free(r);
> +}
> +
> +static VAStatus
> +create_surface_from_fd(struct vaapi_recorder *r, int prime_fd,
> +		       int stride, VASurfaceID *surface)
> +{
> +	VASurfaceAttrib va_attribs[2];
> +	VASurfaceAttribExternalBuffers va_attrib_extbuf;
> +	VAStatus status;
> +
> +	unsigned long buffer_fd = prime_fd;
> +
> +	va_attrib_extbuf.pixel_format = VA_FOURCC_BGRX;
> +	va_attrib_extbuf.width = r->width;
> +	va_attrib_extbuf.height = r->height;
> +	va_attrib_extbuf.data_size = r->height * stride;
> +	va_attrib_extbuf.num_planes = 1;
> +	va_attrib_extbuf.pitches[0] = stride;
> +	va_attrib_extbuf.offsets[0] = 0;
> +	va_attrib_extbuf.buffers = &buffer_fd;
> +	va_attrib_extbuf.num_buffers = 1;
> +	va_attrib_extbuf.flags = 0;
> +	va_attrib_extbuf.private_data = NULL;
> +
> +	va_attribs[0].type = VASurfaceAttribMemoryType;
> +	va_attribs[0].flags = VA_SURFACE_ATTRIB_SETTABLE;
> +	va_attribs[0].value.type = VAGenericValueTypeInteger;
> +	va_attribs[0].value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME;
> +
> +	va_attribs[1].type = VASurfaceAttribExternalBufferDescriptor;
> +	va_attribs[1].flags = VA_SURFACE_ATTRIB_SETTABLE;
> +	va_attribs[1].value.type = VAGenericValueTypePointer;
> +	va_attribs[1].value.value.p = &va_attrib_extbuf;
> +
> +	status = vaCreateSurfaces(r->va_dpy, VA_RT_FORMAT_RGB32,
> +				  r->width, r->height, surface, 1,
> +				  va_attribs, 2);
> +
> +	return status;
> +}
> +
> +static VAStatus
> +convert_rgb_to_yuv(struct vaapi_recorder *r, VASurfaceID rgb_surface)
> +{
> +	VAProcPipelineParameterBuffer *pipeline_param;
> +	VAStatus status;
> +
> +	status = vaMapBuffer(r->va_dpy, r->vpp.pipeline_buf,
> +			     (void **) &pipeline_param);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	memset(pipeline_param, 0, sizeof *pipeline_param);
> +
> +	pipeline_param->surface = rgb_surface;
> +	pipeline_param->surface_color_standard  = VAProcColorStandardNone;
> +
> +	pipeline_param->output_background_color = 0xff000000;
> +	pipeline_param->output_color_standard   = VAProcColorStandardNone;
> +
> +	status = vaUnmapBuffer(r->va_dpy, r->vpp.pipeline_buf);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	status = vaBeginPicture(r->va_dpy, r->vpp.ctx, r->vpp.output);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	status = vaRenderPicture(r->va_dpy, r->vpp.ctx,
> +				 &r->vpp.pipeline_buf, 1);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	status = vaEndPicture(r->va_dpy, r->vpp.ctx);
> +	if (status != VA_STATUS_SUCCESS)
> +		return status;
> +
> +	return status;
> +}
> +
> +void
> +vaapi_recorder_frame(struct vaapi_recorder *r, int prime_fd,
> +		     int stride)
> +{
> +	VASurfaceID rgb_surface;
> +	VAStatus status;
> +
> +	status = create_surface_from_fd(r, prime_fd, stride, &rgb_surface);
> +	if (status != VA_STATUS_SUCCESS) {
> +		weston_log("[libva recorder] "
> +			   "failed to create surface from bo\n");
> +		return;
> +	}
> +
> +	status = convert_rgb_to_yuv(r, rgb_surface);
> +	if (status != VA_STATUS_SUCCESS) {
> +		weston_log("[libva recorder] "
> +			   "color space conversion failed\n");
> +		return;
> +	}
> +
> +	encoder_encode(r, r->vpp.output);
> +
> +	vaDestroySurfaces(r->va_dpy, &rgb_surface, 1);
> +}
> +
> +
> diff --git a/src/vaapi-recorder.h b/src/vaapi-recorder.h
> new file mode 100644
> index 0000000..664b1f9
> --- /dev/null
> +++ b/src/vaapi-recorder.h
> @@ -0,0 +1,35 @@
> +/*
> + * Copyright © 2013 Intel Corporation
> + *
> + * Permission to use, copy, modify, distribute, and sell this software and
> + * its documentation for any purpose is hereby granted without fee, provided
> + * that the above copyright notice appear in all copies and that both that
> + * copyright notice and this permission notice appear in supporting
> + * documentation, and that the name of the copyright holders not be used in
> + * advertising or publicity pertaining to distribution of the software
> + * without specific, written prior permission.  The copyright holders make
> + * no representations about the suitability of this software for any
> + * purpose.  It is provided "as is" without express or implied warranty.
> + *
> + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
> + * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
> + * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
> + * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
> + * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
> + * CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
> + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
> + */
> +
> +#ifndef _VAAPI_RECORDER_H_
> +#define _VAAPI_RECORDER_H_
> +
> +struct vaapi_recorder;
> +
> +struct vaapi_recorder *
> +vaapi_recorder_create(int drm_fd, int width, int height, const char *filename);
> +void
> +vaapi_recorder_destroy(struct vaapi_recorder *r);
> +void
> +vaapi_recorder_frame(struct vaapi_recorder *r, int fd, int stride);
> +
> +#endif /* _VAAPI_RECORDER_H_ */
> -- 
> 1.7.9.5
> 
> _______________________________________________
> wayland-devel mailing list
> wayland-devel at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/wayland-devel


More information about the wayland-devel mailing list