[gst-devel] GStreamer / C example

Erwan Masson masson.erwan1 at gmail.com
Mon Apr 30 14:39:38 CEST 2007


Hi,
 Some of people are asking a C code example for their application. I was on
the same problem a few weeks, I will give you a sample of my code. My code
was so horrible(lots of code in comments) that I was forced to cut a
(callback like hand off signal and typefind signal).
So It is just an example, you will have to code yourself the bus
fonction(see in doc), and also the callback function: new padd added.
With that structure, I was able to grab frame video and audio, to
synchronise manually audio and video buffers (after a frame audio and video
it stop to lets the other thread do the same frame).
Hope this help you a bit,
Erwan Masson

/* Main pipeline */
>   pipeline = gst_pipeline_new ("Main pipeline");
>   source = gst_element_factory_make ("filesrc", "file-source");
>   /* the parser got 2 dynamic output pad, you will have to link them to
> your audio thread and video thread */
>   parser = gst_element_factory_make ("decodebin", "decodebin-parser");
>
>   /* Audio Pipeline */
>   pipeAudio = gst_pipeline_new ("audio-player ");
>   /* A queue is needed to synchronise with Video thread */
>     aqueue = gst_element_factory_make("queue", "aqueue");
>   decoder = gst_element_factory_make ("identity",
> "identity-decoder-audio");
>   conv = gst_element_factory_make ("audioconvert", "converteraudio");
>   /* Identity, useful for add handdoff signal (to grab a sample) */
>   aconv2 = gst_element_factory_make ("identity", " identity conv2");
>   /* With typefind you are able to retrieves some info in the signal */
>   afind = gst_element_factory_make ("typefind", "typefindaudio");
>   sink = gst_element_factory_make ("alsasink", "alsa-output");
>
>
>
>   /* Video Pipeline */
>   pipeVideo = gst_pipeline_new ("video-player");
>   /* queue usefull to synchronize with audio thread */
>     vqueue = gst_element_factory_make("queue", "vqueue");
>   vdecoder = gst_element_factory_make ("identity", "identity-decoder");
>   vconv = gst_element_factory_make ("ffmpegcolorspace", "convertervideo");
>   /* Use capsfilter if you want to convert to RGB (default ffmpeg output
> is YUV */
>   vcapsfilter = gst_element_factory_make ("capsfilter", "restreint le
> caps");
>   g_object_set (G_OBJECT (vcapsfilter), "caps",
>           gst_caps_new_simple ("video/x-raw-rgb",
>                      "bpp", G_TYPE_INT, 32,
>                      "depth", G_TYPE_INT, 32,
>                      NULL)
>             , NULL);
>     /* Put a handoff signal on identity and you will grab video frame */
>   vconv2 = gst_element_factory_make ("identity", "identity-vconv2");
>   g_signal_connect (vconv2, "handoff", G_CALLBACK (cb_handoff_video),
> NULL);
>   /* use typefind if you want to grab some info on video, like width,
> height....*/
>   vfind = gst_element_factory_make ("typefind", "typefindVideo2");
>   vsink = gst_element_factory_make ("fakesink", "video-fake-output");
>
>
>   /* You need to test all Element to see if they are created */
>   if (!pipeline || !source || !parser || !decoder || !conv || !sink) {
>     g_print ("One element could not be created 1\n");
>     if (!decoder)
>        g_print("decoder\n");
>     if (!parser)
>         g_print("parser\n");
>     if (!source)
>         g_print("source\n");
>     if(!conv)
>         g_print("conv\n");
>     if(!pipeline)
>         g_print("pipeline\n");
>     if(!sink)
>         g_print("sink\n");
>     return -1;
>   }
>   if (!vqueue || !aqueue  || !vdecoder || !vconv || !vsink) {
>     g_print ("One element could not be created 2\n");
>     return -1;
>   }
>
>
>   g_object_set (G_OBJECT (source), "location","myFileName.avi", NULL);
>
> /* Add a  bus to catch Information */
>   bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
>   gst_bus_add_watch (bus, cb_bus_call, loop);
>   gst_object_unref (bus);
>   bus = gst_pipeline_get_bus (GST_PIPELINE (pipeVideo));
>   gst_bus_add_watch (bus, cb_bus_call, loop);
>   gst_object_unref (bus);
>
>
> /* Video pipeline */
> /* Add element in pipeline */
>   gst_bin_add_many (GST_BIN (pipeVideo), vqueue, vdecoder, vconv,
> vcapsfilter, vconv2, vfind, vsink, NULL);
>   /* Link element in pipeline */
>   gst_element_link_many (vqueue, vdecoder,  vconv, vcapsfilter, vconv2,
> vfind , vsink, NULL);
>   /* Set the ghost pad for the viedo pipeline (pad for input)*/
>   pad = gst_element_get_pad (vqueue, "sink");
>   gst_element_add_pad (pipeVideo, gst_ghost_pad_new ("sink", pad));
>   gst_object_unref (GST_OBJECT (pad));
>
>
>
>
> /* Audio pipeline */
>   gst_bin_add_many (GST_BIN (pipeAudio),aqueue, decoder, conv, aconv2,
> afind, sink, NULL);
>   gst_element_link_many (aqueue, decoder, conv, aconv2, afind, sink,
> NULL);
>   pad = gst_element_get_pad (aqueue, "sink");
>   gst_element_add_pad (pipeAudio, gst_ghost_pad_new ("sink", pad));
>   gst_object_unref (GST_OBJECT (pad));
>
> /* Main pipeline */
>   gst_bin_add_many (GST_BIN (pipeline), source, parser, NULL);
>   gst_element_link (source, parser);
>
>   /* link together - note that we cannot link the parser and
>    * decoder yet, because the parser uses dynamic pads. For
> that,GST_STATE_READY
>    * we set a pad-added signal handler. */
>   g_signal_connect (parser, "pad-added", G_CALLBACK (cb_new_pad), NULL);
>
>
>   /* Now set to playing and iterate. */
>   g_print ("Setting to PLAYING\n");
>   gst_element_set_state (pipeline, GST_STATE_PLAYING);
>
>   g_print ("Running\n");
>   g_main_loop_run (loop);
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.freedesktop.org/archives/gstreamer-devel/attachments/20070430/493486bd/attachment.htm>


More information about the gstreamer-devel mailing list