How to extract video frames from pipeline through gstreamer-1.0???

Faran m.faran.majeed at gmail.com
Sun Jun 28 07:49:02 PDT 2015


I've written the following code to extract video frames but now am stuck.
Could anyone please help???

#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <glib.h>

static gboolean
bus_call (GstBus     *bus,
          GstMessage *msg,
          gpointer    data)
{
  GMainLoop *loop = (GMainLoop *) data;

  switch (GST_MESSAGE_TYPE (msg)) {

    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;

    case GST_MESSAGE_ERROR: {
      gchar  *debug;
      GError *error;

      gst_message_parse_error (msg, &error, &debug);
      g_free (debug);

g_printerr ("Error: %s\n", error->message);
      g_error_free (error);

      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }

  return TRUE;
}
static void sink_pad_add(GstElement *src, GstPad *pad, gpointer data)
{
   g_print("\n This method is called when sink is padded \n");
}

static void
on_pad_added (GstElement *src, GstPad *new_pad, gpointer    data)
{
  GstPad *sink_pad_audio, *sink_pad_video;
  GstElement *decoder = (GstElement *) data;

  /* We can now link this pad with the vorbis-decoder sink pad */
  g_print ("Dynamic pad created, linking demuxer/decoder\n");

  sink_pad_audio = gst_element_get_static_pad (decoder, "sink");
  //sink_pad_video = gst_element_get_static_pad (decoder, "sink");
  
   gst_pad_link (new_pad, sink_pad_audio);
  gst_object_unref (sink_pad_audio);
}
static void image_processing()
{
   g_print("image processing method is called");
}


int
main (int   argc,
      char *argv[])
{
  GMainLoop *loop;

  GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink;
  GstElement *video_decoder, *video_conv, *video_sink;
  GstBus *bus;
  guint bus_watch_id;
   GstElementFactory *factory;
  /* Initialisation */
  gst_init (&argc, &argv);

  loop = g_main_loop_new (NULL, FALSE);


  /* Check input arguments */
  if (argc != 2) {
    g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[0]);
    return -1;
  }


  /* Create gstreamer elements */
  pipeline = gst_pipeline_new ("audio-player");
  source   = gst_element_factory_make ("filesrc", "source");
 
  demuxer  = gst_element_factory_make ("oggdemux",      "ogg-demuxer");
   video_decoder  = gst_element_factory_make ("theoradec","video_decoder");
  video_conv     = gst_element_factory_make
("videoconvert","video_convert");
  video_sink     = gst_element_factory_make ("autovideosink","sink");
  if(!demuxer){g_print("qtdemuxer is not created");}

  if(!video_conv){g_print("video converter is not created");}
  

  if (!pipeline || !source || !demuxer || !decoder || !conv || !sink ||
!video_decoder || !video_conv || !video_sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  /* Set up the pipeline */

  /* we set the input filename to the source element */
  g_object_set (G_OBJECT (source), "location", argv[1], NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* we add all elements into the pipeline */
  /* file-source | ogg-demuxer | vorbis-decoder | converter | alsa-output */
  //gst_bin_add_many (GST_BIN (pipeline),  source, demuxer, decoder, conv,
sink /*,video_decoder, video_conv, video_sink*/, NULL);

  gst_bin_add_many (GST_BIN (pipeline), source, demuxer, video_decoder,
video_conv, video_sink, NULL);


  /* we link the elements together */
  /* file-source -> ogg-demuxer ~> vorbis-decoder -> converter ->
alsa-output */
  gst_element_link (source, demuxer);
  
  //gst_element_link_many (decoder, conv, sink, NULL); for .ogg audio
  gst_element_link_many (video_decoder, video_conv, video_sink, NULL); //
for .ogg video
  /*if(!gst_element_link_many (video_conv, video_sink, NULL))
  {
    g_print("One of the element is not linking");
  }*/
  g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added),
video_decoder);
  g_signal_connect (video_conv, "pad-added", G_CALLBACK(sink_pad_add),
video_sink);
  g_object_connect (video_conv, "pad-added", G_CALLBACK(sink_pad_add),
video_sink);
  /* note that the demuxer will be linked to the decoder dynamically.
     The reason is that Ogg may contain various streams (for example
     audio and video). The source pad(s) will be created at run time,
     by the demuxer when it detects the amount and nature of streams.
     Therefore we connect a callback function which will be executed
     when the "pad-added" is emitted.*/


  /* Set the pipeline to "playing" state*/
  g_print ("Now playing: %s\n", argv[1]);
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  
  /* Iterate */
  g_print ("Running...\n");
  gchar *name;
  g_object_get(G_OBJECT (video_sink), "name", &name, NULL);
  factory = gst_element_factory_find ("autovideosink");
  g_print("The name of video_sink is  %s \n", name);
  g_print ("The '%s' element is a member of the category %s.\n"
           "Description: %s\n",
           gst_plugin_feature_get_name (GST_PLUGIN_FEATURE (factory)),
           gst_element_factory_get_metadata (factory,
GST_ELEMENT_METADATA_KLASS),
           gst_element_factory_get_metadata (factory,
GST_ELEMENT_METADATA_DESCRIPTION));

  GstPad *pad = gst_element_get_static_pad(video_sink, name);
  //name = gst_pad_get_name (pad);
  g_print ("A new pad %s was created\n", name);
  g_free (name);
  GstCaps *c = gst_pad_get_pad_template_caps(pad);
  g_print("Th structure is %c", c);
  GstSample *sample;
  g_signal_emit_by_name (video_sink, "pull-sample", sample);
  GstCaps *caps = gst_sample_get_caps(sample);
  GstBuffer *buffer = gst_sample_get_buffer (sample);
          //buffer = gst_app_sink_pull_buffer(pro->sink)
  GstMapInfo map;
  gst_buffer_map (buffer, &map, GST_MAP_READ);
  gint width, height;
  const GstStructure *str;    
  str = gst_caps_get_structure (caps, 0);
  gst_structure_get_int (str, "width", &width);
  gst_structure_get_int (str, "height", &height);
  g_print("The width is = %d", width);
  g_print("The height is = %d", height);
  cv::Mat frame(cv::Size(width, height), CV_8UC3, (char*)map.data, -3);
  cv::imwrite("Test2.jpg", frame);


  GstElement *rsink = gst_bin_get_by_name( GST_BIN( pipeline ), "sink" );
  GstSample *pstGstSample1;
 // pstGstSample1 = gst_app_sink_pull_sample( (GstAppSink *)rsink);
  //image_processing(pstGstSample1);
  g_main_loop_run (loop);


  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL
,"/home/siraj/Desktop/myplayer");
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);

return 0;
}



--
View this message in context: http://gstreamer-devel.966125.n4.nabble.com/How-to-extract-video-frames-from-pipeline-through-gstreamer-1-0-tp4672495.html
Sent from the GStreamer-devel mailing list archive at Nabble.com.


More information about the gstreamer-devel mailing list