Hi,<br> Some of people are asking a C code example for their application. I was on the same problem a few weeks, I will give you a sample of my code. My code was so horrible(lots of code in comments) that I was forced to cut a (callback like hand off signal and typefind signal).
<br>So It is just an example, you will have to code yourself the bus fonction(see in doc), and also the callback function: new padd added.<br>With that structure, I was able to grab frame video and audio, to synchronise manually audio and video buffers (after a frame audio and video it stop to lets the other thread do the same frame).
<br>Hope this help you a bit,<br>Erwan Masson<br><br><blockquote style="border-left: 1px solid rgb(204, 204, 204); margin: 0pt 0pt 0pt 0.8ex; padding-left: 1ex;" class="gmail_quote">/* Main pipeline */<br> pipeline = gst_pipeline_new ("Main pipeline");
<br> source = gst_element_factory_make ("filesrc", "file-source");<br> /* the parser got 2 dynamic output pad, you will have to link them to your audio thread and video thread */<br> parser = gst_element_factory_make ("decodebin", "decodebin-parser");
<br> <br> /* Audio Pipeline */<br> pipeAudio = gst_pipeline_new ("audio-player ");<br> /* A queue is needed to synchronise with Video thread */<br> aqueue = gst_element_factory_make("queue", "aqueue");
<br> decoder = gst_element_factory_make ("identity", "identity-decoder-audio");<br> conv = gst_element_factory_make ("audioconvert", "converteraudio"); <br> /* Identity, useful for add handdoff signal (to grab a sample) */
<br> aconv2 = gst_element_factory_make ("identity", " identity conv2");<br> /* With typefind you are able to retrieves some info in the signal */<br> afind = gst_element_factory_make ("typefind", "typefindaudio");
<br> sink = gst_element_factory_make ("alsasink", "alsa-output"); <br> <br> <br> <br> /* Video Pipeline */<br> pipeVideo = gst_pipeline_new ("video-player");<br> /* queue usefull to synchronize with audio thread */
<br> vqueue = gst_element_factory_make("queue", "vqueue");<br> vdecoder = gst_element_factory_make ("identity", "identity-decoder"); <br> vconv = gst_element_factory_make ("ffmpegcolorspace", "convertervideo");
<br> /* Use capsfilter if you want to convert to RGB (default ffmpeg output is YUV */<br> vcapsfilter = gst_element_factory_make ("capsfilter", "restreint le caps"); <br> g_object_set (G_OBJECT (vcapsfilter), "caps",
<br> gst_caps_new_simple ("video/x-raw-rgb",<br> "bpp", G_TYPE_INT, 32,<br> "depth", G_TYPE_INT, 32,<br> NULL)<br> , NULL);
<br> /* Put a handoff signal on identity and you will grab video frame */<br> vconv2 = gst_element_factory_make ("identity", "identity-vconv2"); <br> g_signal_connect (vconv2, "handoff", G_CALLBACK (cb_handoff_video), NULL);
<br> /* use typefind if you want to grab some info on video, like width, height....*/<br> vfind = gst_element_factory_make ("typefind", "typefindVideo2"); <br> vsink = gst_element_factory_make ("fakesink", "video-fake-output");
<br> <br> <br> /* You need to test all Element to see if they are created */<br> if (!pipeline || !source || !parser || !decoder || !conv || !sink) {<br> g_print ("One element could not be created 1\n");<br>
if (!decoder)<br> g_print("decoder\n");<br> if (!parser)<br> g_print("parser\n");<br> if (!source)<br> g_print("source\n");<br> if(!conv)<br> g_print("conv\n");
<br> if(!pipeline)<br> g_print("pipeline\n");<br> if(!sink)<br> g_print("sink\n");<br> return -1;<br> } <br> if (!vqueue || !aqueue || !vdecoder || !vconv || !vsink) {<br> g_print ("One element could not be created 2\n");
<br> return -1;<br> }<br><br> <br> g_object_set (G_OBJECT (source), "location","myFileName.avi", NULL);<br><br>/* Add a bus to catch Information */<br> bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
<br> gst_bus_add_watch (bus, cb_bus_call, loop);<br> gst_object_unref (bus); <br> bus = gst_pipeline_get_bus (GST_PIPELINE (pipeVideo));<br> gst_bus_add_watch (bus, cb_bus_call, loop);<br> gst_object_unref (bus);<br>
<br><br>/* Video pipeline */ <br>/* Add element in pipeline */<br> gst_bin_add_many (GST_BIN (pipeVideo), vqueue, vdecoder, vconv, vcapsfilter, vconv2, vfind, vsink, NULL); <br> /* Link element in pipeline */ <br> gst_element_link_many (vqueue, vdecoder, vconv, vcapsfilter, vconv2, vfind , vsink, NULL);
<br> /* Set the ghost pad for the viedo pipeline (pad for input)*/<br> pad = gst_element_get_pad (vqueue, "sink"); <br> gst_element_add_pad (pipeVideo, gst_ghost_pad_new ("sink", pad)); <br> gst_object_unref (GST_OBJECT (pad));
<br> <br> <br><br><br>/* Audio pipeline */ <br> gst_bin_add_many (GST_BIN (pipeAudio),aqueue, decoder, conv, aconv2, afind, sink, NULL); <br> gst_element_link_many (aqueue, decoder, conv, aconv2, afind, sink, NULL);
<br> pad = gst_element_get_pad (aqueue, "sink");<br> gst_element_add_pad (pipeAudio, gst_ghost_pad_new ("sink", pad));<br> gst_object_unref (GST_OBJECT (pad)); <br> <br>/* Main pipeline */ <br> gst_bin_add_many (GST_BIN (pipeline), source, parser, NULL);
<br> gst_element_link (source, parser);<br><br> /* link together - note that we cannot link the parser and<br> * decoder yet, because the parser uses dynamic pads. For that,GST_STATE_READY<br> * we set a pad-added signal handler. */
<br> g_signal_connect (parser, "pad-added", G_CALLBACK (cb_new_pad), NULL);<br> <br> <br> /* Now set to playing and iterate. */<br> g_print ("Setting to PLAYING\n");<br> gst_element_set_state (pipeline, GST_STATE_PLAYING);
<br> <br> g_print ("Running\n");<br> g_main_loop_run (loop);<br></blockquote>