[gst-devel] Newbie help. Cannot output audio/video simultaneously (with DirectFB)

danielkun at iremo.com danielkun at iremo.com
Wed Feb 27 06:01:16 CET 2008


Hello again,

I sent an email earlier (same as the one below) asking why the
video/audio was choppy.
It turned out that I hadn't set the props correctly. (I was a little bit
too excited)
Sorry. The video/audio plays back just fine now.

Thanks

Daniel

---


Hello,

I have another question, this time regarding queues.

I succeeded to playback an MPEG file with DirectFB although the audio
and video is very choppy. I tried changing the properties for the queue
but it doesn't seem to help.

What am I missing?
(I pasted my current code at the bottom)

To Thijs:
Thank you again for your help.

> gst-launch filesrc location=movie.mpg ! mpegdemux name=demuxer
> demuxer. ! queue ! mpeg2dec ! ffmpegcolorspace ! dfbvideosink
> demuxer. ! queue ! mad ! audioconvert ! alsasink

I was unsuccessful in executing the pipeline above although,
changing "demuxer." to "demuxer.video_00" got me a step further.
But I'm currently getting the following error:

gstbasesrc.c(2165): get_base_src_loop(): /pipeline0/filesrc0:
streaming task paused, reason not-negotiated (-4)

> gst-launch filesrc location=movie.mpg ! mpegdemux name=demuxer
> demuxer.audio_00 ! queue ! mad ! audioconvert ! alsasink

The above (audio only) works just fine.

Thank you,

Daniel


#include <string.h>
#include <directfb.h>
#include <gst/gst.h>

static IDirectFB *dfb = NULL;
static IDirectFBSurface *primary = NULL;
static GMainLoop *loop;

GstElement *pipeline, *source, *parser,
*queue_audio, *decoder_audio, *convert_audio, *sink_audio,
*queue_video, *decoder_video, *convert_video, *sink_video;

static gboolean
get_me_out (gpointer data)
{
g_main_loop_quit (loop);
return FALSE;
}

static void
new_pad (GstElement *element, GstPad *pad, gpointer data)
{
gchar *name;
name = gst_pad_get_name (pad);

if ( NULL != strstr(name, "video"))
{
GstPad *sinkpad;
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (queue_video, "sink");
gst_pad_link (pad, sinkpad); // pad, sink
gst_object_unref (sinkpad);
}

if ( NULL != strstr(name, "audio"))
{
GstPad *sinkpad;
g_print ("Dynamic pad created, linking parser/decoder '%s'\n", name);
sinkpad = gst_element_get_pad (queue_audio, "sink");
gst_pad_link (pad, sinkpad); // pad, sink
gst_object_unref (sinkpad);
}
}


int
main (int argc, char *argv[])
{
DFBSurfaceDescription dsc;
GstBus *bus;

// Init both GStreamer and DirectFB
DFBCHECK (DirectFBInit (&argc, &argv));
gst_init (&argc, &argv);

loop = g_main_loop_new (NULL, FALSE);

// Creates DirectFB main context and set it to fullscreen layout
DFBCHECK (DirectFBCreate (&dfb));
DFBCHECK (dfb->SetCooperativeLevel (dfb, DFSCL_FULLSCREEN));

// We want a double buffered primary surface
dsc.flags = DSDESC_CAPS;
dsc.caps = DSCAPS_PRIMARY | DSCAPS_FLIPPING;

DFBCHECK (dfb->CreateSurface (dfb, &dsc, &primary));


// create elements
pipeline = gst_pipeline_new (NULL);
source = gst_element_factory_make ("filesrc", NULL); // videotestsrc
parser = gst_element_factory_make ("mpegdemux", NULL); // dvddemux,
mpegparse, mpegvideoparse

queue_audio = gst_element_factory_make ("queue", NULL);
queue_video = gst_element_factory_make ("queue", NULL);

guint intval;
guint64 int64val;

g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 400, NULL);
g_object_set (G_OBJECT (queue_audio), "max-size-time", 2000000000,
NULL);
g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 20000000,
NULL);


g_object_get (G_OBJECT (queue_audio), "max-size-buffers", &intval,
NULL);
g_print("max-size-buffers:'%d'\n", intval);
// g_object_get (G_OBJECT (queue_audio), "max-size-time", &int64val,
NULL);
// g_print("max-size-time:'%llu'\n", int64val);
g_object_get (G_OBJECT (queue_audio), "max-size-bytes", &intval, NULL);
g_print("max-size-bytes:'%d'\n", intval);


decoder_audio = gst_element_factory_make ("mad", NULL);
decoder_video = gst_element_factory_make ("mpeg2dec", NULL);

convert_audio = gst_element_factory_make ("audioconvert", NULL);
convert_video = gst_element_factory_make("ffmpegcolorspace", NULL);

sink_audio = gst_element_factory_make ("alsasink", NULL);
sink_video = gst_element_factory_make ("dfbvideosink", NULL);


// that's the interesting part, giving the primary surface to
dfbvideosink
g_object_set (sink_video, "surface", primary, NULL);

// set filename property on the file source
g_object_set (G_OBJECT (source), "location", argv[1], NULL);


// add all elements to pipeline
gst_bin_add_many (GST_BIN (pipeline), source, parser,
queue_video, decoder_video, convert_video, sink_video,
queue_audio, decoder_audio, convert_audio, sink_audio, NULL);

// link together - note that we cannot link the parser and decoder yet
gst_element_link (source, parser);
gst_element_link_many (queue_video, decoder_video, convert_video,
sink_video, NULL);
gst_element_link_many (queue_audio, decoder_audio, convert_audio,
sink_audio, NULL);

// add callback to parser
g_signal_connect (parser, "pad-added", G_CALLBACK (new_pad), NULL);

// Now set to playing and iterate.
g_print ("Setting to PLAYING\n");
gst_element_set_state (pipeline, GST_STATE_PLAYING);
g_print ("Running\n");

// get us out after xx seconds
g_timeout_add (10000, get_me_out, NULL);
g_main_loop_run (loop);

// release elements and stop playback
gst_element_set_state (pipeline, GST_STATE_NULL);

// free the main loop
g_main_loop_unref (loop);

// clean up nicely
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));

// release DirectFB context and surface
primary->Release (primary);
dfb->Release (dfb);

return 0;
}








More information about the gstreamer-devel mailing list