[Libva] [PATCH] JPEG Encode: Added Unit Test for testing jpeg encode feature.

Sirisha Muppavarapu sirisha.muppavarapu at intel.com
Mon Nov 10 23:08:44 PST 2014


---
 test/encode/Makefile.am     |   9 +-
 test/encode/jpegenc.c       | 996 ++++++++++++++++++++++++++++++++++++++++++++
 test/encode/jpegenc_utils.h | 353 ++++++++++++++++
 3 files changed, 1357 insertions(+), 1 deletion(-)
 create mode 100644 test/encode/jpegenc.c
 create mode 100644 test/encode/jpegenc_utils.h

diff --git a/test/encode/Makefile.am b/test/encode/Makefile.am
index 5ddabd1..45c39a5 100644
--- a/test/encode/Makefile.am
+++ b/test/encode/Makefile.am
@@ -20,7 +20,7 @@
 # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
-bin_PROGRAMS = avcenc mpeg2vaenc h264encode
+bin_PROGRAMS = avcenc mpeg2vaenc h264encode jpegenc
 
 INCLUDES = \
        -Wall                           \
@@ -49,6 +49,13 @@ mpeg2vaenc_LDADD	= \
 	$(top_builddir)/test/common/libva-display.la \
 	-lpthread
 
+jpegenc_SOURCES		= jpegenc.c
+jpegenc_CFLAGS		= -I$(top_srcdir)/test/common -g
+jpegenc_LDADD		= \
+	$(top_builddir)/va/libva.la \
+	$(top_builddir)/test/common/libva-display.la \
+	-lpthread	
+	
 valgrind:	$(bin_PROGRAMS)
 	for a in $(bin_PROGRAMS); do \
 		valgrind --leak-check=full --show-reachable=yes .libs/$$a; \
diff --git a/test/encode/jpegenc.c b/test/encode/jpegenc.c
new file mode 100644
index 0000000..dababdc
--- /dev/null
+++ b/test/encode/jpegenc.c
@@ -0,0 +1,996 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Simple JPEG encoder based on libVA.
+ *
+ * Usage:
+ * ./jpegenc <width> <height> <input file> <output file> <input filetype 0(I420)/1(NV12)/2(UYVY)/3(YUY2)/4(Y8)/5(RGB)> q <quality>
+ * Currently supporting only I420 and NV12 input file formats.
+ */  
+
+#include "sysdeps.h"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <unistd.h>
+
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <time.h>
+
+#include <pthread.h>
+
+#include <va/va.h>
+#include <va/va_enc_jpeg.h>
+#include "va_display.h"
+#include "jpegenc_utils.h"
+
+#ifndef VA_FOURCC_I420
+#define VA_FOURCC_I420          0x30323449
+#endif
+
+#define CHECK_VASTATUS(va_status,func)                                  \
+    if (va_status != VA_STATUS_SUCCESS) {                                   \
+        fprintf(stderr,"%s:%s (%d) failed,exit\n", __func__, func, __LINE__); \
+        exit(1);                                                            \
+    }
+
+
+void show_help()
+{
+    printf("Usage: ./jpegenc <width> <height> <input file> <output file> <fourcc value 0(I420)/1(NV12)/2(UYVY)/3(YUY2)/4(Y8)/5(RGB)> q <quality>\n");
+    printf("Currently supporting only I420 and NV12 input file formats.\n"); 
+    printf("Example: ./jpegenc 1024 768 input_file.yuv output.jpeg 0 50\n\n"); 
+    return;
+}
+
+
+void jpegenc_pic_param_init(VAEncPictureParameterBufferJPEG *pic_param,int width,int height,int quality, YUVComponentSpecs yuvComp)
+{
+    assert(pic_param);
+    
+    pic_param->picture_width = width;
+    pic_param->picture_height = height;
+    pic_param->quality = quality;
+    
+    pic_param->pic_flags.bits.profile = 0;      //Profile = Baseline
+    pic_param->pic_flags.bits.progressive = 0;  //Sequential encoding
+    pic_param->pic_flags.bits.huffman = 1;      //Uses Huffman coding
+    pic_param->pic_flags.bits.interleaved = 0;  //Input format is interleaved (YUV)
+    pic_param->pic_flags.bits.differential = 0; //non-Differential Encoding
+    
+    pic_param->sample_bit_depth = 8; //only 8 bit sample depth is currently supported
+    pic_param->num_scan = 1;
+    pic_param->num_components = yuvComp.num_components; // Supporting only upto 3 components maximum
+    //set component_id Ci and Tqi
+    if(yuvComp.fourcc_val == VA_FOURCC_Y800) {
+        pic_param->component_id[0] = 0;
+        pic_param->quantiser_table_selector[0] = 0;
+    } else {
+        pic_param->component_id[0] = pic_param->quantiser_table_selector[0] = 0;
+        pic_param->component_id[1] = pic_param->quantiser_table_selector[1] = 1;
+        pic_param->component_id[2] = 2;
+        pic_param->quantiser_table_selector[2] = 1;
+    }
+    
+    pic_param->quality = quality;
+}
+
+void jpegenc_qmatrix_init(VAQMatrixBufferJPEG *quantization_param, YUVComponentSpecs yuvComp)
+{
+    int i=0;
+    quantization_param->load_lum_quantiser_matrix = 1;
+   
+    //LibVA expects the QM in zigzag order 
+    for(i=0; i<NUM_QUANT_ELEMENTS; i++) {
+        quantization_param->lum_quantiser_matrix[i] = jpeg_luma_quant[jpeg_zigzag[i]];
+    }
+    
+    
+    if(yuvComp.fourcc_val == VA_FOURCC_Y800) {
+        quantization_param->load_chroma_quantiser_matrix = 0;
+    } else {
+        quantization_param->load_chroma_quantiser_matrix = 1;
+        for(i=0; i<NUM_QUANT_ELEMENTS; i++) {
+            quantization_param->chroma_quantiser_matrix[i] = jpeg_chroma_quant[jpeg_zigzag[i]];
+        }
+    }
+    
+}
+
+void jpegenc_hufftable_init(VAHuffmanTableBufferJPEGBaseline *hufftable_param, YUVComponentSpecs yuvComp)
+{
+    
+    hufftable_param->load_huffman_table[0] = 1; //Load Luma Hufftable
+    if(yuvComp.fourcc_val == VA_FOURCC_Y800) {
+        hufftable_param->load_huffman_table[1] = 0; //Do not load Chroma Hufftable for Y8
+    } else {
+        hufftable_param->load_huffman_table[1] = 1; //Load Chroma Hufftable for other formats
+    }
+    
+   //Load Luma hufftable values
+   //Load DC codes
+   memcpy(hufftable_param->huffman_table[0].num_dc_codes, jpeg_hufftable_luma_dc+1, 16);
+   //Load DC Values
+   memcpy(hufftable_param->huffman_table[0].dc_values, jpeg_hufftable_luma_dc+17, 12);
+   //Load AC codes
+   memcpy(hufftable_param->huffman_table[0].num_ac_codes, jpeg_hufftable_luma_ac+1, 16);
+   //Load AC Values
+   memcpy(hufftable_param->huffman_table[0].ac_values, jpeg_hufftable_luma_ac+17, 162);
+   memset(hufftable_param->huffman_table[0].pad, 0, 2);
+      
+   
+   //Load Chroma hufftable values if needed
+   if(yuvComp.fourcc_val != VA_FOURCC_Y800) {
+       //Load DC codes
+       memcpy(hufftable_param->huffman_table[1].num_dc_codes, jpeg_hufftable_chroma_dc+1, 16);
+       //Load DC Values
+       memcpy(hufftable_param->huffman_table[1].dc_values, jpeg_hufftable_chroma_dc+17, 12);
+       //Load AC codes
+       memcpy(hufftable_param->huffman_table[1].num_ac_codes, jpeg_hufftable_chroma_ac+1, 16);
+       //Load AC Values
+       memcpy(hufftable_param->huffman_table[1].ac_values, jpeg_hufftable_chroma_ac+17, 162);
+       memset(hufftable_param->huffman_table[1].pad, 0, 2);      
+       
+   }
+    
+}
+
+void jpegenc_slice_param_init(VAEncSliceParameterBufferJPEG *slice_param, YUVComponentSpecs yuvComp)
+{
+    slice_param->restart_interval = 0;
+    
+    slice_param->num_components = yuvComp.num_components;
+    
+    slice_param->components[0].component_selector = 1;
+    slice_param->components[0].dc_table_selector = 0;
+    slice_param->components[0].ac_table_selector = 0;        
+
+    if(yuvComp.num_components > 1) { 
+        slice_param->components[1].component_selector = 2;
+        slice_param->components[1].dc_table_selector = 1;
+        slice_param->components[1].ac_table_selector = 1;        
+
+        slice_param->components[2].component_selector = 3;
+        slice_param->components[2].dc_table_selector = 1;
+        slice_param->components[2].ac_table_selector = 1;        
+    }
+}
+
+
+void populate_quantdata(JPEGQuantSection *quantVal, int type)
+{
+    uint8_t zigzag_qm[NUM_QUANT_ELEMENTS];
+    int i;
+
+    quantVal->DQT = DQT;
+    quantVal->Pq = 0;
+    quantVal->Tq = type;
+    if(type == 0) {
+        for(i=0; i<NUM_QUANT_ELEMENTS; i++) {
+            zigzag_qm[i] = jpeg_luma_quant[jpeg_zigzag[i]];
+        }
+
+        memcpy(quantVal->Qk, zigzag_qm, NUM_QUANT_ELEMENTS);
+    } else {
+        for(i=0; i<NUM_QUANT_ELEMENTS; i++) {
+            zigzag_qm[i] = jpeg_chroma_quant[jpeg_zigzag[i]];
+        }
+        memcpy(quantVal->Qk, zigzag_qm, NUM_QUANT_ELEMENTS);
+    }
+    quantVal->Lq = 3 + NUM_QUANT_ELEMENTS;
+}
+
+void populate_frame_header(JPEGFrameHeader *frameHdr, YUVComponentSpecs yuvComp, int picture_width, int picture_height)
+{
+    int i=0;
+    
+    frameHdr->SOF = SOF0;
+    frameHdr->Lf = 8 + (3 * yuvComp.num_components); //Size of FrameHeader in bytes without the Marker SOF
+    frameHdr->P = 8;
+    frameHdr->Y = picture_height;
+    frameHdr->X = picture_width;
+    frameHdr->Nf = yuvComp.num_components;
+    
+    for(i=0; i<yuvComp.num_components; i++) {
+        frameHdr->JPEGComponent[i].Ci = i+1;
+        
+        if(i == 0) {
+            frameHdr->JPEGComponent[i].Hi = yuvComp.y_h_subsample;
+            frameHdr->JPEGComponent[i].Vi = yuvComp.y_v_subsample;
+            frameHdr->JPEGComponent[i].Tqi = 0;
+
+        } else {
+            //Analyzing the sampling factors for U/V, they are 1 for all formats except for Y8. 
+            //So, it is okay to have the code below like this. For Y8, we wont reach this code.
+            frameHdr->JPEGComponent[i].Hi = 1;  
+            frameHdr->JPEGComponent[i].Vi = 1;
+            frameHdr->JPEGComponent[i].Tqi = 1;
+        }
+    }
+}
+
+void populate_huff_section_header(JPEGHuffSection *huffSectionHdr, int th, int tc)
+{
+    int i=0, totalCodeWords=0;
+    
+    huffSectionHdr->DHT = DHT;
+    huffSectionHdr->Tc = tc;
+    huffSectionHdr->Th = th;
+    
+    if(th == 0) { //If Luma
+
+        //If AC
+        if(tc == 1) {
+            memcpy(huffSectionHdr->Li, jpeg_hufftable_luma_ac+1, NUM_AC_RUN_SIZE_BITS);
+            memcpy(huffSectionHdr->Vij, jpeg_hufftable_luma_ac+17, NUM_AC_CODE_WORDS_HUFFVAL);
+        }
+               
+        //If DC        
+        if(tc == 0) {
+            memcpy(huffSectionHdr->Li, jpeg_hufftable_luma_dc+1, NUM_DC_RUN_SIZE_BITS);
+            memcpy(huffSectionHdr->Vij, jpeg_hufftable_luma_dc+17, NUM_DC_CODE_WORDS_HUFFVAL);
+        }
+        
+        for(i=0; i<NUM_AC_RUN_SIZE_BITS; i++) {
+            totalCodeWords += huffSectionHdr->Li[i];
+        }
+        
+        huffSectionHdr->Lh = 3 + 16 + totalCodeWords;
+
+    } else { //If Chroma
+        //If AC
+        if(tc == 1) {
+            memcpy(huffSectionHdr->Li, jpeg_hufftable_chroma_ac+1, NUM_AC_RUN_SIZE_BITS);
+            memcpy(huffSectionHdr->Vij, jpeg_hufftable_chroma_ac+17, NUM_AC_CODE_WORDS_HUFFVAL);
+        }
+               
+        //If DC        
+        if(tc == 0) {
+            memcpy(huffSectionHdr->Li, jpeg_hufftable_chroma_dc+1, NUM_DC_RUN_SIZE_BITS);
+            memcpy(huffSectionHdr->Vij, jpeg_hufftable_chroma_dc+17, NUM_DC_CODE_WORDS_HUFFVAL);
+        }
+
+    }
+}
+
+void populate_scan_header(JPEGScanHeader *scanHdr, YUVComponentSpecs yuvComp)
+{
+    
+    scanHdr->SOS = SOS;
+    scanHdr->Ns = yuvComp.num_components;
+    
+    //Y Component
+    scanHdr->ScanComponent[0].Csj = 1;
+    scanHdr->ScanComponent[0].Tdj = 0;
+    scanHdr->ScanComponent[0].Taj = 0;
+    
+    if(yuvComp.num_components > 1) {
+        //U Component
+        scanHdr->ScanComponent[1].Csj = 2;
+        scanHdr->ScanComponent[1].Tdj = 1;
+        scanHdr->ScanComponent[1].Taj = 1;
+        
+        //V Component
+        scanHdr->ScanComponent[2].Csj = 3;
+        scanHdr->ScanComponent[2].Tdj = 1;
+        scanHdr->ScanComponent[2].Taj = 1;
+    }
+    
+    scanHdr->Ss = 0;  //0 for Baseline
+    scanHdr->Se = 63; //63 for Baseline
+    scanHdr->Ah = 0;  //0 for Baseline
+    scanHdr->Al = 0;  //0 for Baseline
+    
+    scanHdr->Ls = 3 + (yuvComp.num_components * 2) + 3;
+    
+}
+
+
+int build_packed_jpeg_header_buffer(unsigned char **header_buffer, YUVComponentSpecs yuvComp, int picture_width, int picture_height, uint16_t restart_interval)
+{
+    bitstream bs;
+    int i=0, j=0;
+    
+    bitstream_start(&bs);
+    
+    //Add SOI
+    bitstream_put_ui(&bs, SOI, 16);
+    
+    //Add AppData
+    bitstream_put_ui(&bs, APP0, 16);  //APP0 marker
+    bitstream_put_ui(&bs, 16, 16);    //Length excluding the marker
+    bitstream_put_ui(&bs, 0x4A, 8);   //J
+    bitstream_put_ui(&bs, 0x46, 8);   //F
+    bitstream_put_ui(&bs, 0x49, 8);   //I
+    bitstream_put_ui(&bs, 0x46, 8);   //F
+    bitstream_put_ui(&bs, 0x00, 8);   //0
+    bitstream_put_ui(&bs, 1, 8);      //Major Version
+    bitstream_put_ui(&bs, 1, 8);      //Minor Version
+    bitstream_put_ui(&bs, 1, 8);      //Density units 0:no units, 1:pixels per inch, 2: pixels per cm
+    bitstream_put_ui(&bs, 72, 16);    //X density
+    bitstream_put_ui(&bs, 72, 16);    //Y density
+    bitstream_put_ui(&bs, 0, 8);      //Thumbnail width
+    bitstream_put_ui(&bs, 0, 8);      //Thumbnail height
+    
+    //Add QTable - Y
+    JPEGQuantSection quantLuma;
+    populate_quantdata(&quantLuma, 0);
+
+    bitstream_put_ui(&bs, quantLuma.DQT, 16);
+    bitstream_put_ui(&bs, quantLuma.Lq, 16);
+    bitstream_put_ui(&bs, quantLuma.Pq, 4);
+    bitstream_put_ui(&bs, quantLuma.Tq, 4);
+    for(i=0; i<NUM_QUANT_ELEMENTS; i++) {
+        bitstream_put_ui(&bs, quantLuma.Qk[i], 8);
+    }
+
+    //Add QTable - U/V
+    if(yuvComp.fourcc_val != VA_FOURCC_Y800) {
+        JPEGQuantSection quantChroma;
+        populate_quantdata(&quantChroma, 1);
+        
+        bitstream_put_ui(&bs, quantChroma.DQT, 16);
+        bitstream_put_ui(&bs, quantChroma.Lq, 16);
+        bitstream_put_ui(&bs, quantChroma.Pq, 4);
+        bitstream_put_ui(&bs, quantChroma.Tq, 4);
+        for(i=0; i<NUM_QUANT_ELEMENTS; i++) {
+            bitstream_put_ui(&bs, quantChroma.Qk[i], 8);
+        }
+    }
+    
+    //Add FrameHeader
+    JPEGFrameHeader frameHdr;
+    memset(&frameHdr,0,sizeof(JPEGFrameHeader));
+    populate_frame_header(&frameHdr, yuvComp, picture_width, picture_height);
+
+    bitstream_put_ui(&bs, frameHdr.SOF, 16);
+    bitstream_put_ui(&bs, frameHdr.Lf, 16);
+    bitstream_put_ui(&bs, frameHdr.P, 8);
+    bitstream_put_ui(&bs, frameHdr.Y, 16);
+    bitstream_put_ui(&bs, frameHdr.X, 16);
+    bitstream_put_ui(&bs, frameHdr.Nf, 8);
+    for(i=0; i<frameHdr.Nf;i++) {
+        bitstream_put_ui(&bs, frameHdr.JPEGComponent[i].Ci, 8);
+        bitstream_put_ui(&bs, frameHdr.JPEGComponent[i].Hi, 4);
+        bitstream_put_ui(&bs, frameHdr.JPEGComponent[i].Vi, 4);
+        bitstream_put_ui(&bs, frameHdr.JPEGComponent[i].Tqi, 8);
+    }
+
+    //Add HuffTable AC and DC for Y,U/V components
+    JPEGHuffSection acHuffSectionHdr, dcHuffSectionHdr;
+        
+    for(i=0; (i<yuvComp.num_components && (i<=1)); i++) {
+        //Add DC component (Tc = 0)
+        populate_huff_section_header(&dcHuffSectionHdr, i, 0); 
+        
+        bitstream_put_ui(&bs, dcHuffSectionHdr.DHT, 16);
+        bitstream_put_ui(&bs, dcHuffSectionHdr.Lh, 16);
+        bitstream_put_ui(&bs, dcHuffSectionHdr.Tc, 4);
+        bitstream_put_ui(&bs, dcHuffSectionHdr.Th, 4);
+        for(j=0; j<NUM_DC_RUN_SIZE_BITS; j++) {
+            bitstream_put_ui(&bs, dcHuffSectionHdr.Li[j], 8);
+        }
+        
+        for(j=0; j<NUM_DC_CODE_WORDS_HUFFVAL; j++) {
+            bitstream_put_ui(&bs, dcHuffSectionHdr.Vij[j], 8);
+        }
+
+        //Add AC component (Tc = 1)
+        populate_huff_section_header(&acHuffSectionHdr, i, 1);
+        
+        bitstream_put_ui(&bs, acHuffSectionHdr.DHT, 16);
+        bitstream_put_ui(&bs, acHuffSectionHdr.Lh, 16);
+        bitstream_put_ui(&bs, acHuffSectionHdr.Tc, 4);
+        bitstream_put_ui(&bs, acHuffSectionHdr.Th, 4);
+        for(j=0; j<NUM_AC_RUN_SIZE_BITS; j++) {
+            bitstream_put_ui(&bs, acHuffSectionHdr.Li[j], 8);
+        }
+
+        for(j=0; j<NUM_AC_CODE_WORDS_HUFFVAL; j++) {
+            bitstream_put_ui(&bs, acHuffSectionHdr.Vij[j], 8);
+        }
+
+    }
+    
+    //Add Restart Interval if restart_interval is not 0
+    if(restart_interval != 0) {
+        JPEGRestartSection restartHdr;
+        restartHdr.DRI = DRI;
+        restartHdr.Lr = 4;
+        restartHdr.Ri = restart_interval;
+
+        bitstream_put_ui(&bs, restartHdr.DRI, 16); 
+        bitstream_put_ui(&bs, restartHdr.Lr, 16);
+        bitstream_put_ui(&bs, restartHdr.Ri, 16); 
+    }
+    
+    //Add ScanHeader
+    JPEGScanHeader scanHdr;
+    populate_scan_header(&scanHdr, yuvComp);
+ 
+    bitstream_put_ui(&bs, scanHdr.SOS, 16);
+    bitstream_put_ui(&bs, scanHdr.Ls, 16);
+    bitstream_put_ui(&bs, scanHdr.Ns, 8);
+    
+    for(i=0; i<scanHdr.Ns; i++) {
+        bitstream_put_ui(&bs, scanHdr.ScanComponent[i].Csj, 8);
+        bitstream_put_ui(&bs, scanHdr.ScanComponent[i].Tdj, 4);
+        bitstream_put_ui(&bs, scanHdr.ScanComponent[i].Taj, 4);
+    }
+
+    bitstream_put_ui(&bs, scanHdr.Ss, 8);
+    bitstream_put_ui(&bs, scanHdr.Se, 8);
+    bitstream_put_ui(&bs, scanHdr.Ah, 4);
+    bitstream_put_ui(&bs, scanHdr.Al, 4);
+
+    bitstream_end(&bs);
+    *header_buffer = (unsigned char *)bs.buffer;
+    
+    return bs.bit_offset;
+}
+
+//Upload the yuv image from the file to the VASurface
+void upload_yuv_to_surface(VADisplay va_dpy, FILE *yuv_fp, VASurfaceID surface_id, YUVComponentSpecs yuvComp, int picture_width, int picture_height, int frame_size)
+{
+
+    VAImage surface_image;
+    VAStatus va_status;
+    void *surface_p = NULL;
+    unsigned char newImageBuffer[frame_size];
+    unsigned char *y_src, *u_src, *v_src;
+    unsigned char *y_dst, *u_dst, *v_dst;
+    int y_size = picture_width * picture_height;
+    int u_size = (picture_width >> 1) * (picture_height >> 1);
+    int row, col;
+    size_t n_items;
+    
+    memset(newImageBuffer,0,frame_size);
+    do {
+        n_items = fread(newImageBuffer, frame_size, 1, yuv_fp);
+    } while (n_items != 1);
+
+    va_status = vaDeriveImage(va_dpy, surface_id, &surface_image);
+    CHECK_VASTATUS(va_status,"vaDeriveImage");
+
+    vaMapBuffer(va_dpy, surface_image.buf, &surface_p);
+    assert(VA_STATUS_SUCCESS == va_status);
+
+    y_src = newImageBuffer;
+    u_src = newImageBuffer + y_size; /* UV offset for NV12 */
+    v_src = newImageBuffer + y_size + u_size;
+
+    y_dst = surface_p + surface_image.offsets[0];
+    u_dst = surface_p + surface_image.offsets[1]; /* UV offset for NV12 */
+    v_dst = surface_p + surface_image.offsets[2];
+
+    /* Y plane */
+    for (row = 0; row < surface_image.height; row++) {
+        memcpy(y_dst, y_src, surface_image.width);
+        y_dst += surface_image.pitches[0];
+        y_src += picture_width;
+    }
+
+    if(yuvComp.num_components > 1 ) {
+        
+        switch(yuvComp.fourcc_val) {
+            
+            case VA_FOURCC_NV12: {
+                for (row = 0; row < surface_image.height/2; row++) {
+                    memcpy(u_dst, u_src, surface_image.width);
+                    u_dst += surface_image.pitches[1];
+                    u_src += (picture_width);
+                }
+                break;
+            }
+
+            case VA_FOURCC_I420: {
+                for (row = 0; row < surface_image.height / 2; row++) {
+                    for (col = 0; col < surface_image.width / 2; col++) {
+                    u_dst[col * 2] = u_src[col];
+                    u_dst[col * 2 + 1] = v_src[col];
+                    }  
+
+                    u_dst += surface_image.pitches[1];
+                    u_src += (picture_width / 2);
+                    v_src += (picture_width / 2);
+                }
+
+                break;
+            }
+    
+            //TO DO: Code for below formats needs to be fixed.
+            //This will come as enhancement to the feature.        
+            case VA_FOURCC_UYVY:
+            case VA_FOURCC_YUY2: {
+                const int U = 1;
+                const int V = 2;
+
+                u_dst = surface_p + surface_image.offsets[U];
+                v_dst = surface_p + surface_image.offsets[V];
+
+                for (row = 0; row < surface_image.height / 2; row++) {
+                    memcpy(u_dst, u_src, surface_image.width / 2);
+                    memcpy(v_dst, v_src, surface_image.width / 2);
+                    u_dst += surface_image.pitches[U];
+                    v_dst += surface_image.pitches[V];
+                    u_src += (picture_width / 2);
+                    v_src += (picture_width / 2);
+                }
+             
+                break;
+            }
+            
+            case VA_FOURCC_444P: {
+                const int U = 1;
+                const int V = 2;
+
+                u_dst = surface_p + surface_image.offsets[U];
+                v_dst = surface_p + surface_image.offsets[V];
+
+                for (row = 0; row < surface_image.height; row++) {
+                    memcpy(u_dst, u_src, surface_image.width);
+                    memcpy(v_dst, v_src, surface_image.width);
+                    u_dst += surface_image.pitches[U];
+                    v_dst += surface_image.pitches[V];
+                    u_src += (picture_width);
+                    v_src += (picture_width);
+                }
+                break;
+            }
+            
+            default: {
+                //processing like Y8   
+                break;
+            }
+        } //end of switch
+    } //end of if
+    
+    vaUnmapBuffer(va_dpy, surface_image.buf);
+    vaDestroyImage(va_dpy, surface_image.image_id);
+    
+}
+
+
+
+void init_yuv_component(YUVComponentSpecs *yuvComponent, int yuv_type, int *surface_type, VASurfaceAttrib *fourcc)
+{
+    
+    //<fourcc value 0(NV12)/1(UYVY)/2(YUY2)/3(Y8)/4(RGB)>     
+    switch(yuv_type)
+    {
+        case 0 :   //I420
+        case 1 : { //NV12
+            yuvComponent->va_surface_format = (*surface_type) = VA_RT_FORMAT_YUV420;
+            if(yuv_type == 0) {
+                yuvComponent->fourcc_val = VA_FOURCC_I420;
+                fourcc->value.value.i = VA_FOURCC_NV12;
+            } else {
+                yuvComponent->fourcc_val = fourcc->value.value.i = VA_FOURCC_NV12;
+            } 
+            yuvComponent->num_components = 3;
+            yuvComponent->y_h_subsample = 2;
+            yuvComponent->y_v_subsample = 2;
+            yuvComponent->u_h_subsample = 1;
+            yuvComponent->u_v_subsample = 1;
+            yuvComponent->v_h_subsample = 1;
+            yuvComponent->v_v_subsample = 1;            
+            break;
+        }
+        
+        case 2: { //UYVY 
+            yuvComponent->va_surface_format = (*surface_type) = VA_RT_FORMAT_YUV422;
+            yuvComponent->fourcc_val = fourcc->value.value.i = VA_FOURCC_UYVY;
+            yuvComponent->num_components = 3;
+            yuvComponent->y_h_subsample = 2;
+            yuvComponent->y_v_subsample = 1;
+            yuvComponent->u_h_subsample = 1;
+            yuvComponent->u_v_subsample = 1;
+            yuvComponent->v_h_subsample = 1;
+            yuvComponent->v_v_subsample = 1;
+            break;
+        }
+        
+        case 3: { //YUY2
+            yuvComponent->va_surface_format = (*surface_type) = VA_RT_FORMAT_YUV422;
+            yuvComponent->fourcc_val = fourcc->value.value.i = VA_FOURCC_YUY2;
+            yuvComponent->num_components = 3;
+            yuvComponent->y_h_subsample = 2;
+            yuvComponent->y_v_subsample = 1;
+            yuvComponent->u_h_subsample = 1;
+            yuvComponent->u_v_subsample = 1;
+            yuvComponent->v_h_subsample = 1;
+            yuvComponent->v_v_subsample = 1;
+            break;
+        }
+        
+        case 4: { //Y8
+            yuvComponent->va_surface_format = (*surface_type) = VA_RT_FORMAT_YUV400;
+            yuvComponent->fourcc_val = fourcc->value.value.i = VA_FOURCC_Y800;
+            yuvComponent->num_components = 1;
+            yuvComponent->y_h_subsample = 1;
+            yuvComponent->y_v_subsample = 0;
+            yuvComponent->u_h_subsample = 0;
+            yuvComponent->u_v_subsample = 0;
+            yuvComponent->v_h_subsample = 0;
+            yuvComponent->v_v_subsample = 0;
+            break;
+        }
+        
+        case 5: { //RGB or YUV444
+            yuvComponent->va_surface_format = (*surface_type) = VA_RT_FORMAT_YUV444;
+            yuvComponent->fourcc_val = fourcc->value.value.i = VA_FOURCC_444P;
+            yuvComponent->num_components = 3;
+            yuvComponent->y_h_subsample = 1;
+            yuvComponent->y_v_subsample = 1;
+            yuvComponent->u_h_subsample = 1;
+            yuvComponent->u_v_subsample = 1;
+            yuvComponent->v_h_subsample = 1;
+            yuvComponent->v_v_subsample = 1;
+            break;
+        }
+        
+        default: {
+            printf("Unsupported format:\n");
+            show_help();
+            break;
+        }
+        
+    }
+    
+}
+
+int encode_input_image(FILE *yuv_fp, FILE *jpeg_fp, int picture_width, int picture_height, int frame_size, int yuv_type, int quality)
+{
+    int num_entrypoints,enc_entrypoint;
+    int major_ver, minor_ver;
+    int surface_type;
+    VAEntrypoint entrypoints[5];
+    VASurfaceAttrib fourcc;
+    VAConfigAttrib attrib[2];
+    VADisplay   va_dpy;
+    VAStatus va_status;
+    VAConfigID config_id;
+    VASurfaceID surface_id;
+    VAContextID context_id;
+    VABufferID pic_param_buf_id;                /* Picture parameter id*/
+    VABufferID slice_param_buf_id;              /* Slice parameter id, only 1 slice per frame in jpeg encode */
+    VABufferID codedbuf_buf_id;                 /* Output buffer id, compressed data */
+    VABufferID packed_raw_header_param_buf_id;  /* Header parameter buffer id */
+    VABufferID packed_raw_header_buf_id;        /* Header buffer id */
+    VABufferID qmatrix_buf_id;                  /* Quantization Matrix id */
+    VABufferID huffmantable_buf_id;             /* Huffman table id*/
+    VAEncPictureParameterBufferJPEG pic_param;  /* Picture parameter buffer */
+    VAEncSliceParameterBufferJPEG slice_param;  /* Slice parameter buffer */
+    VAQMatrixBufferJPEG quantization_param;     /* Quantization Matrix buffer */
+    VAHuffmanTableBufferJPEGBaseline hufftable_param; /* Huffmantable buffer */
+    YUVComponentSpecs yuvComponent;
+    int writeToFile = 1;
+    
+    
+    fourcc.type =VASurfaceAttribPixelFormat;
+    fourcc.flags=VA_SURFACE_ATTRIB_SETTABLE;
+    fourcc.value.type=VAGenericValueTypeInteger;
+    
+    init_yuv_component(&yuvComponent, yuv_type, &surface_type, &fourcc);
+    
+    /* 1. Initialize the va driver */
+    va_dpy = va_open_display();
+    va_status = vaInitialize(va_dpy, &major_ver, &minor_ver);
+    assert(va_status == VA_STATUS_SUCCESS);
+    
+    /* 2. Query for the entrypoints for the JPEGBaseline profile */
+    va_status = vaQueryConfigEntrypoints(va_dpy, VAProfileJPEGBaseline, entrypoints, &num_entrypoints);
+    CHECK_VASTATUS(va_status, "vaQueryConfigEntrypoints");
+    // We need picture level encoding (VAEntrypointEncPicture). Find if it is supported. 
+    for (enc_entrypoint = 0; enc_entrypoint < num_entrypoints; enc_entrypoint++) {
+        if (entrypoints[enc_entrypoint] == VAEntrypointEncPicture)
+            break;
+    }
+    if (enc_entrypoint == num_entrypoints) {
+        /* No JPEG Encode (VAEntrypointEncPicture) entry point found */
+        assert(0);
+    }
+    
+    /* 3. Query for the Render Target format supported */
+    attrib[0].type = VAConfigAttribRTFormat;
+    attrib[1].type = VAConfigAttribEncJPEG;
+    vaGetConfigAttributes(va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture, &attrib[0], 2);
+
+    // RT should be one of below.
+    if(!((attrib[0].value & VA_RT_FORMAT_YUV420) || (attrib[0].value & VA_RT_FORMAT_YUV422) 
+        ||(attrib[0].value & VA_RT_FORMAT_YUV444) || (attrib[0].value & VA_RT_FORMAT_YUV400))) 
+    {
+        /* Did not find the supported RT format */
+        assert(0);        
+    }
+
+    VAConfigAttribValEncJPEG jpeg_attrib_val;
+    jpeg_attrib_val.value = attrib[1].value;
+
+    /* Set JPEG profile attribs */
+    jpeg_attrib_val.bits.arithmatic_coding_mode = 0;
+    jpeg_attrib_val.bits.progressive_dct_mode = 0;
+    jpeg_attrib_val.bits.non_interleaved_mode = 1;
+    jpeg_attrib_val.bits.differential_mode = 0;
+
+    attrib[1].value = jpeg_attrib_val.value;
+    
+    /* 4. Create Config for the profile=VAProfileJPEGBaseline, entrypoint=VAEntrypointEncPicture,
+     * with RT format attribute */
+    va_status = vaCreateConfig(va_dpy, VAProfileJPEGBaseline, VAEntrypointEncPicture, 
+                               &attrib[0], 2, &config_id);
+    CHECK_VASTATUS(va_status, "vaQueryConfigEntrypoints");
+    
+    /* 5. Create Surface for the input picture */
+    va_status = vaCreateSurfaces(va_dpy, surface_type, picture_width, picture_height, 
+                                 &surface_id, 1, &fourcc, 1);
+    CHECK_VASTATUS(va_status, "vaCreateSurfaces");
+    
+    //Map the input yuv file to the input surface created with the surface_id
+    upload_yuv_to_surface(va_dpy, yuv_fp, surface_id, yuvComponent, picture_width, picture_height, frame_size);
+    
+    /* 6. Create Context for the encode pipe*/
+    va_status = vaCreateContext(va_dpy, config_id, picture_width, picture_height, 
+                                VA_PROGRESSIVE, &surface_id, 1, &context_id);
+    CHECK_VASTATUS(va_status, "vaCreateContext");
+
+    /* Create buffer for Encoded data to be stored */
+    va_status =  vaCreateBuffer(va_dpy, context_id, VAEncCodedBufferType,
+                                   frame_size, 1, NULL, &codedbuf_buf_id);
+    CHECK_VASTATUS(va_status,"vaCreateBuffer");
+    
+    //Initialize the picture parameter buffer
+    pic_param.coded_buf = codedbuf_buf_id;
+    jpegenc_pic_param_init(&pic_param, picture_width, picture_height, quality, yuvComponent);
+    
+    /* 7. Create buffer for the picture parameter */
+    va_status = vaCreateBuffer(va_dpy, context_id, VAEncPictureParameterBufferType,
+                               sizeof(VAEncPictureParameterBufferJPEG), 1, &pic_param, &pic_param_buf_id);
+    CHECK_VASTATUS(va_status,"vaCreateBuffer");
+    
+    //Load the QMatrix 
+    jpegenc_qmatrix_init(&quantization_param, yuvComponent);
+    
+    /* 8. Create buffer for Quantization Matrix */
+    va_status = vaCreateBuffer(va_dpy, context_id, VAQMatrixBufferType, 
+                               sizeof(VAQMatrixBufferJPEG), 1, &quantization_param, &qmatrix_buf_id);
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
+    
+    //Load the Huffman Tables
+    jpegenc_hufftable_init(&hufftable_param, yuvComponent);
+    
+    /* 9. Create buffer for Huffman Tables */
+    va_status = vaCreateBuffer(va_dpy, context_id, VAHuffmanTableBufferType, 
+                               sizeof(VAHuffmanTableBufferJPEGBaseline), 1, &hufftable_param, &huffmantable_buf_id);
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
+    
+    //Initialize the slice parameter buffer
+    jpegenc_slice_param_init(&slice_param, yuvComponent);
+    
+    /* 10. Create buffer for slice parameter */
+    va_status = vaCreateBuffer(va_dpy, context_id, VAEncSliceParameterBufferType, 
+                            sizeof(slice_param), 1, &slice_param, &slice_param_buf_id);
+    CHECK_VASTATUS(va_status, "vaCreateBuffer");
+
+    //Pack headers and send using Raw data buffer
+    VAEncPackedHeaderParameterBuffer packed_header_param_buffer;
+    unsigned int length_in_bits;
+    unsigned char *packed_header_buffer = NULL;
+
+    length_in_bits = build_packed_jpeg_header_buffer(&packed_header_buffer, yuvComponent, picture_width, picture_height, slice_param.restart_interval);
+    packed_header_param_buffer.type = VAEncPackedHeaderRawData;
+    packed_header_param_buffer.bit_length = length_in_bits;
+    packed_header_param_buffer.has_emulation_bytes = 0;
+    
+    /* 11. Create raw buffer for header */
+    va_status = vaCreateBuffer(va_dpy,
+                               context_id,
+                               VAEncPackedHeaderParameterBufferType,
+                               sizeof(packed_header_param_buffer), 1, &packed_header_param_buffer,
+                               &packed_raw_header_param_buf_id);
+    CHECK_VASTATUS(va_status,"vaCreateBuffer");
+
+    va_status = vaCreateBuffer(va_dpy,
+                               context_id,
+                               VAEncPackedHeaderDataBufferType,
+                               (length_in_bits + 7) / 8, 1, packed_header_buffer,
+                               &packed_raw_header_buf_id);
+    CHECK_VASTATUS(va_status,"vaCreateBuffer");
+    
+    /* 12. Begin picture */
+    va_status = vaBeginPicture(va_dpy, context_id, surface_id);
+    CHECK_VASTATUS(va_status, "vaBeginPicture");   
+
+    /* 13. Render picture for all the VA buffers created */
+    va_status = vaRenderPicture(va_dpy,context_id, &pic_param_buf_id, 1);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+   
+    va_status = vaRenderPicture(va_dpy,context_id, &qmatrix_buf_id, 1);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+
+    va_status = vaRenderPicture(va_dpy,context_id, &huffmantable_buf_id, 1);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+   
+    va_status = vaRenderPicture(va_dpy,context_id, &slice_param_buf_id, 1);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    
+    va_status = vaRenderPicture(va_dpy,context_id, &packed_raw_header_param_buf_id, 1);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    
+    va_status = vaRenderPicture(va_dpy,context_id, &packed_raw_header_buf_id, 1);
+    CHECK_VASTATUS(va_status, "vaRenderPicture");
+    
+    va_status = vaEndPicture(va_dpy,context_id);
+    CHECK_VASTATUS(va_status, "vaEndPicture");
+
+    if (writeToFile) {
+        VASurfaceStatus surface_status;
+        size_t w_items;
+        VACodedBufferSegment *coded_buffer_segment;
+        unsigned char *coded_mem;
+        int slice_data_length;
+
+        va_status = vaSyncSurface(va_dpy, surface_id);
+        CHECK_VASTATUS(va_status, "vaSyncSurface");
+    
+        surface_status = 0;
+        va_status = vaQuerySurfaceStatus(va_dpy, surface_id, &surface_status);
+        CHECK_VASTATUS(va_status,"vaQuerySurfaceStatus");
+
+        va_status = vaMapBuffer(va_dpy, codedbuf_buf_id, (void **)(&coded_buffer_segment));
+        CHECK_VASTATUS(va_status,"vaMapBuffer");
+
+        coded_mem = coded_buffer_segment->buf;
+  
+       if (coded_buffer_segment->status & VA_CODED_BUF_STATUS_SLICE_OVERFLOW_MASK) {
+            vaUnmapBuffer(va_dpy, codedbuf_buf_id);
+            printf("ERROR......Coded buffer too small\n");
+        }
+
+
+        slice_data_length = coded_buffer_segment->size;
+
+        do {
+            w_items = fwrite(coded_mem, slice_data_length, 1, jpeg_fp);
+        } while (w_items != 1);
+
+        va_status = vaUnmapBuffer(va_dpy, codedbuf_buf_id);
+        CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+    }
+       
+    vaDestroyBuffer(va_dpy, pic_param_buf_id);
+    vaDestroyBuffer(va_dpy, qmatrix_buf_id);
+    vaDestroyBuffer(va_dpy, slice_param_buf_id);
+    vaDestroyBuffer(va_dpy, huffmantable_buf_id);
+    vaDestroyBuffer(va_dpy, codedbuf_buf_id);
+    vaDestroyBuffer(va_dpy, packed_raw_header_param_buf_id);
+    vaDestroyBuffer(va_dpy, packed_raw_header_buf_id);
+    vaDestroySurfaces(va_dpy,&surface_id,1);
+    vaDestroyContext(va_dpy,context_id);
+    vaDestroyConfig(va_dpy,config_id);
+    vaTerminate(va_dpy);
+    va_close_display(va_dpy);
+
+    return 0;
+}
+
+
+int main(int argc, char *argv[])
+{
+    FILE *yuv_fp;
+    FILE *jpeg_fp;
+    off_t file_size;
+    clock_t start_time, finish_time;
+    unsigned int duration;
+    unsigned int yuv_type = 0;
+    int quality = 0;
+    unsigned int picture_width = 0;
+    unsigned int picture_height = 0;
+    unsigned int frame_size = 0;
+    
+    va_init_display_args(&argc, argv);
+    
+    if(argc != 7) {
+        show_help();
+        return -1;
+    }
+    
+    picture_width = atoi(argv[1]);
+    picture_height = atoi(argv[2]);
+    yuv_type = atoi(argv[5]);
+    quality = atoi(argv[6]);
+    
+    yuv_fp = fopen(argv[3],"rb");
+    if ( yuv_fp == NULL){
+        printf("Can't open input YUV file\n");
+        return -1;
+    }
+    
+    fseeko(yuv_fp, (off_t)0, SEEK_END);
+    file_size = ftello(yuv_fp);
+    
+    //<input file type: 0(I420)/1(NV12)/2(UYVY)/3(YUY2)/4(Y8)/5(RGB)>     
+    switch(yuv_type)
+    {
+        case 0 :   //I420 
+        case 1 : { //NV12
+            frame_size = picture_width * picture_height +  ((picture_width * picture_height) >> 1) ; 
+            break;
+        }
+        
+        case 2:  //UYVY
+        case 3: { //YUY2
+           frame_size = 2 * (picture_width * picture_height); 
+           break;
+        }
+        
+        case 4: { //Y8
+            frame_size = picture_width * picture_height;
+            break;
+        }
+        
+        case 5: { //RGB or YUV444
+            frame_size = 3 * (picture_width * picture_height) ;
+            break;
+        }
+        
+        default: {
+            printf("Unsupported format:\n");
+            show_help();
+            break;
+        }
+        
+    }
+    
+    if ( (file_size < frame_size) || (file_size % frame_size) ) {
+        fclose(yuv_fp);
+        printf("The YUV file's size is not correct\n");
+        return -1;
+    }
+    
+    fseeko(yuv_fp, (off_t)0, SEEK_SET);
+
+    jpeg_fp = fopen(argv[4], "wb");  
+    if ( jpeg_fp == NULL) {
+        fclose(yuv_fp);
+        printf("Can't open output destination jpeg file\n");
+        return -1;
+    }   
+        
+    start_time = clock();
+    encode_input_image(yuv_fp, jpeg_fp, picture_width, picture_height, frame_size, yuv_type, quality);
+    if(yuv_fp != NULL) fclose(yuv_fp);
+    if(jpeg_fp != NULL) fclose(jpeg_fp);
+    finish_time = clock();
+    duration = finish_time - start_time;
+    printf("Encoding finished in %u ticks\n", duration);
+    
+    return 0;   
+}
+
diff --git a/test/encode/jpegenc_utils.h b/test/encode/jpegenc_utils.h
new file mode 100644
index 0000000..ce6a476
--- /dev/null
+++ b/test/encode/jpegenc_utils.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * This file is a utilities file which supports JPEG Encode process
+ */ 
+
+#include <sys/types.h>
+#include <stdio.h>
+
+#define MAX_JPEG_COMPONENTS 3 //only for Y, U and V
+#define JPEG_Y 0
+#define JPEG_U 1
+#define JPEG_V 2
+#define NUM_QUANT_ELEMENTS 64
+#define NUM_MAX_HUFFTABLE 2
+#define NUM_AC_RUN_SIZE_BITS 16
+#define NUM_AC_CODE_WORDS_HUFFVAL 162
+#define NUM_DC_RUN_SIZE_BITS 16
+#define NUM_DC_CODE_WORDS_HUFFVAL 12
+
+#define BITSTREAM_ALLOCATE_STEPPING     4096
+
+struct __bitstream {
+    unsigned int *buffer;
+    int bit_offset;
+    int max_size_in_dword;
+};
+
+typedef struct __bitstream bitstream;
+
+static unsigned int 
+swap32(unsigned int val)
+{
+    unsigned char *pval = (unsigned char *)&val;
+
+    return ((pval[0] << 24)     |
+            (pval[1] << 16)     |
+            (pval[2] << 8)      |
+            (pval[3] << 0));
+}
+
+static void
+bitstream_start(bitstream *bs)
+{
+    bs->max_size_in_dword = BITSTREAM_ALLOCATE_STEPPING;
+    bs->buffer = calloc(bs->max_size_in_dword * sizeof(int), 1);
+    bs->bit_offset = 0;
+}
+
+static void
+bitstream_end(bitstream *bs)
+{
+    int pos = (bs->bit_offset >> 5);
+    int bit_offset = (bs->bit_offset & 0x1f);
+    int bit_left = 32 - bit_offset;
+
+    if (bit_offset) {
+        bs->buffer[pos] = swap32((bs->buffer[pos] << bit_left));
+    }
+}
+ 
+static void
+bitstream_put_ui(bitstream *bs, unsigned int val, int size_in_bits)
+{
+    int pos = (bs->bit_offset >> 5);
+    int bit_offset = (bs->bit_offset & 0x1f);
+    int bit_left = 32 - bit_offset;
+
+    if (!size_in_bits)
+        return;
+
+    if (size_in_bits < 32)
+        val &= ((1 << size_in_bits) - 1);
+
+    bs->bit_offset += size_in_bits;
+
+    if (bit_left > size_in_bits) {
+        bs->buffer[pos] = (bs->buffer[pos] << size_in_bits | val);
+    } else {
+        size_in_bits -= bit_left;
+        bs->buffer[pos] = (bs->buffer[pos] << bit_left) | (val >> size_in_bits);
+        bs->buffer[pos] = swap32(bs->buffer[pos]);
+
+        if (pos + 1 == bs->max_size_in_dword) {
+            bs->max_size_in_dword += BITSTREAM_ALLOCATE_STEPPING;
+            bs->buffer = realloc(bs->buffer, bs->max_size_in_dword * sizeof(unsigned int));
+        }
+
+        bs->buffer[pos + 1] = val;
+    }
+}
+
+//As per Jpeg Spec ISO/IEC 10918-1, below values are assigned
+enum jpeg_markers {
+
+ //Define JPEG markers as 0xFFXX if you are adding the value directly to the buffer
+ //Else define marker as 0xXXFF if you are assigning the marker to a structure variable.
+ //This is needed because of the little-endedness of the IA 
+
+ SOI  = 0xFFD8, //Start of Image 
+ EOI  = 0xFFD9, //End of Image
+ SOS  = 0xFFDA, //Start of Scan
+ DQT  = 0xFFDB, //Define Quantization Table
+ DRI  = 0xFFDD, //Define restart interval
+ RST0 = 0xFFD0, //Restart interval termination
+ DHT  = 0xFFC4, //Huffman table
+ SOF0 = 0xFFC0, //Baseline DCT   
+ APP0 = 0xFFE0, //Application Segment
+ COM  = 0xFFFE  //Comment segment    
+};
+
+typedef struct _JPEGFrameHeader {
+    
+    uint16_t SOF;    //Start of Frame Header
+    uint16_t Lf;     //Length of Frame Header
+    uint8_t  P;      //Sample precision
+    uint16_t Y;      //Number of lines
+    uint16_t X;      //Number of samples per line
+    uint8_t  Nf;     //Number of image components in frame
+    
+    struct _JPEGComponent {        
+        uint8_t Ci;    //Component identifier
+        uint8_t Hi:4;  //Horizontal sampling factor
+        uint8_t Vi:4;  //Vertical sampling factor
+        uint8_t Tqi;   //Quantization table destination selector        
+    } JPEGComponent[MAX_JPEG_COMPONENTS];
+    
+} JPEGFrameHeader;
+
+
+typedef struct _JPEGScanHeader {
+    
+    uint16_t SOS;  //Start of Scan
+    uint16_t Ls;   //Length of Scan
+    uint8_t  Ns;   //Number of image components in the scan
+        
+    struct _ScanComponent {
+        uint8_t Csj;   //Scan component selector
+        uint8_t Tdj:4; //DC Entropy coding table destination selector(Tdj:4 bits) 
+        uint8_t Taj:4; //AC Entropy coding table destination selector(Taj:4 bits)       
+    } ScanComponent[MAX_JPEG_COMPONENTS];
+    
+    uint8_t Ss;    //Start of spectral or predictor selection, 0 for Baseline
+    uint8_t Se;    //End of spectral or predictor selection, 63 for Baseline
+    uint8_t Ah:4;  //Successive approximation bit position high, 0 for Baseline
+    uint8_t Al:4;  //Successive approximation bit position low, 0 for Baseline
+    
+} JPEGScanHeader;
+
+
+typedef struct _JPEGQuantSection {
+    
+    uint16_t DQT;    //Quantization table marker
+    uint16_t Lq;     //Length of Quantization table definition
+    uint8_t  Tq:4;   //Quantization table destination identifier
+    uint8_t  Pq:4;   //Quatization table precision. Should be 0 for 8-bit samples
+    uint8_t  Qk[NUM_QUANT_ELEMENTS]; //Quantization table elements    
+    
+} JPEGQuantSection;
+
+typedef struct _JPEGHuffSection {
+    
+        uint16_t DHT;                            //Huffman table marker
+        uint16_t Lh;                             //Huffman table definition length
+        uint8_t  Tc:4;                           //Table class- 0:DC, 1:AC
+        uint8_t  Th:4;                           //Huffman table destination identifier
+        uint8_t  Li[NUM_AC_RUN_SIZE_BITS];       //Number of Huffman codes of length i
+        uint8_t  Vij[NUM_AC_CODE_WORDS_HUFFVAL]; //Value associated with each Huffman code
+    
+} JPEGHuffSection;
+
+
+typedef struct _JPEGRestartSection {
+    
+    uint16_t DRI;  //Restart interval marker
+    uint16_t Lr;   //Legth of restart interval segment
+    uint16_t Ri;   //Restart interval
+    
+} JPEGRestartSection;
+
+
+typedef struct _JPEGCommentSection {
+    
+    uint16_t COM;  //Comment marker
+    uint16_t Lc;   //Comment segment length
+    uint8_t  Cmi;  //Comment byte
+    
+} JPEGCommentSection;
+
+
+typedef struct _JPEGAppSection {
+    
+    uint16_t APPn;  //Application data marker
+    uint16_t Lp;    //Application data segment length
+    uint8_t  Api;   //Application data byte
+    
+} JPEGAppSection;
+
+//Luminance quantization table
+//Source: Jpeg Spec ISO/IEC 10918-1, Annex K, Table K.1
+uint8_t jpeg_luma_quant[NUM_QUANT_ELEMENTS] = {
+    16, 11, 10, 16, 24,  40,  51,  61,
+    12, 12, 14, 19, 26,  58,  60,  55,
+    14, 13, 16, 24, 40,  57,  69,  56,
+    14, 17, 22, 29, 51,  87,  80,  62,
+    18, 22, 37, 56, 68,  109, 103, 77,
+    24, 35, 55, 64, 81,  104, 113, 92,
+    49, 64, 78, 87, 103, 121, 120, 101,
+    72, 92, 95, 98, 112, 100, 103, 99    
+};
+
+//Luminance quantization table
+//Source: Jpeg Spec ISO/IEC 10918-1, Annex K, Table K.2
+uint8_t jpeg_chroma_quant[NUM_QUANT_ELEMENTS] = {
+    17, 18, 24, 47, 99, 99, 99, 99,
+    18, 21, 26, 66, 99, 99, 99, 99,
+    24, 26, 56, 99, 99, 99, 99, 99,
+    47, 66, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99,
+    99, 99, 99, 99, 99, 99, 99, 99
+};
+
+
+//Zigzag scan order of the the Luma and Chroma components
+//Note: Jpeg Spec ISO/IEC 10918-1, Figure A.6 shows the zigzag order differently.
+//The Spec is trying to show the zigzag pattern with number positions. The below
+//table will use the patter shown by A.6 and map the postion of the elements in the array
+uint8_t jpeg_zigzag[] = {
+    0,   1,   8,   16,  9,   2,   3,   10,
+    17,  24,  32,  25,  18,  11,  4,   5,
+    12,  19,  26,  33,  40,  48,  41,  34,
+    27,  20,  13,  6,   7,   14,  21,  28,
+    35,  42,  49,  56,  57,  50,  43,  36,
+    29,  22,  15,  23,  30,  37,  44,  51,
+    58,  59,  52,  45,  38,  31,  39,  46,
+    53,  60,  61,  54,  47,  55,  62,  63
+};
+
+
+//Huffman table for Luminance DC Coefficients
+//Reference Jpeg Spec ISO/IEC 10918-1, K.3.3.1
+//K.3.3.1 is the summarized version of Table K.3
+uint8_t jpeg_hufftable_luma_dc[] = {
+    //TcTh (Tc=0 since 0:DC, 1:AC; Th=0)
+    0x00,
+    //Li
+    0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    //Vi
+    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B
+};
+
+//Huffman table for Chrominance DC Coefficients
+//Reference Jpeg Spec ISO/IEC 10918-1, K.3.3.1
+//K.3.3.1 is the summarized version of Table K.4
+uint8_t jpeg_hufftable_chroma_dc[] = {
+    //TcTh (Tc=0 since 0:DC, 1:AC; Th=1)
+    0x01,
+    //Li
+    0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+    //Vi
+    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B    
+};
+
+
+//Huffman table for Luminance AC Coefficients
+//Reference Jpeg Spec ISO/IEC 10918-1, K.3.3.2
+//K.3.3.2 is the summarized version of Table K.5
+uint8_t jpeg_hufftable_luma_ac[] = {
+    //TcTh (Tc=1 since 0:DC, 1:AC; Th=0)
+    0x10,
+    //Li
+    0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D,
+    //Vi
+    0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 
+    0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 
+    0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28,
+    0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 
+    0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 
+    0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+    0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 
+    0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 
+    0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2,
+    0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 
+    0xF9, 0xFA
+};
+
+//Huffman table for Chrominance AC Coefficients
+//Reference Jpeg Spec ISO/IEC 10918-1, K.3.3.2
+//K.3.3.2 is the summarized version of Table K.6
+uint8_t jpeg_hufftable_chroma_ac[] = {
+    //TcTh (Tc=1 since 0:DC, 1:AC; Th=1)
+    0x11,
+    //Li
+    0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
+    //Vi
+    0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 
+    0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 
+    0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26,
+    0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 
+    0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 
+    0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+    0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 
+    0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 
+    0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA,
+    0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 
+    0xF9, 0xFA
+};
+
+typedef struct _YUVComponentSpecs {
+    //One of 0(NV12)/1(UYVY)/2(YUY2)/3(Y8)/4(RGB)>
+    unsigned int yuv_type;
+    // One of VA_RT_FORMAT_YUV420, VA_RT_FORMAT_YUV422, VA_RT_FORMAT_YUV400, VA_RT_FORMAT_YUV444
+    unsigned int va_surface_format;
+    //One of VA_FOURCC_I420, VA_FOURCC_NV12, VA_FOURCC_UYVY, VA_FOURCC_YUY2, VA_FOURCC_Y800, VA_FOURCC_444P
+    unsigned int fourcc_val; //Using this field to evaluate the input file type.
+    //no.of. components
+    unsigned int num_components;
+    //Y horizontal subsample
+    unsigned int y_h_subsample;
+    //Y vertical subsample
+    unsigned int y_v_subsample;
+    //U horizontal subsample
+    unsigned int u_h_subsample;
+    //U vertical subsample
+    unsigned int u_v_subsample;
+    //V horizontal subsample
+    unsigned int v_h_subsample;
+    //V vertical subsample
+    unsigned int v_v_subsample;    
+} YUVComponentSpecs;
-- 
2.1.0



More information about the Libva mailing list