question: how to properly set up pipeline rtspsrc->decodebin->playsink
maksim
komlev.maxim at gmail.com
Wed Jun 10 04:49:29 UTC 2020
Hi, I have set up pipeline to play rtsp stream, utilizing rtspsrc, two
decodebin (audio and video) and playsink for output, so the pipeline can
play either audio or video stream but i can not making working both. Can
somebody give me clue what is wrong in my pipeline?
here is snippet code:
/* Create our own GLib Main Context and make it the default one */
context = g_main_context_new ();
g_main_context_push_thread_default(context);
/* Create a GLib Main Loop */
GST_DEBUG ("Create main loop...");
main_loop = g_main_loop_new (context, FALSE);
/* Build pipeline */
pipeline = gst_pipeline_new("pipeline");
/* Create source element */
source = gst_element_factory_make("rtspsrc", "source");
if (!source) {
GST_ERROR("Could not create rtspsrc source");
return;
}
/* Set video Source */
g_object_set(G_OBJECT(source), "do-rtcp", TRUE, NULL);
g_object_set(G_OBJECT(source), "latency", 0, NULL);
g_object_set(G_OBJECT(source), "tls-validation-flags",
G_TLS_CERTIFICATE_VALIDATE_ALL, NULL);
NSString *resources = [[NSBundle mainBundle] resourcePath];
const gchar *resources_dir = [resources UTF8String];
gchar *ca_certificates = g_build_filename (resources_dir, "ssl",
"certs", "ca-certificates.crt", NULL);
g_setenv ("CA_CERTIFICATES", ca_certificates, TRUE);
GTlsDatabase *db = NULL;
if (ca_certificates) {
GTlsBackend *backend = g_tls_backend_get_default();
if (backend) {
db = g_tls_file_database_new(ca_certificates, NULL);
if (db)
g_tls_backend_set_default_database(backend, db);
}
}
if (!db) {
GST_ERROR("failed to parse CA CERT: %s\n", error->message);
return;
}
g_object_set(G_OBJECT(source), "tls-database", db, NULL);
/* create decode sinc */
decodeAudioSink = gst_element_factory_make("decodebin", "adecoder");
decodeVideoSink = gst_element_factory_make("decodebin", "vdecoder");
/* create audio/video output */
playSink = gst_element_factory_make ("playsink", "sink");
gst_util_set_object_arg(G_OBJECT(playSink), "flags",
"soft-colorbalance+soft-volume+vis+text+audio+video");
/* Instruct the bus to emit signals for each received message, and
connect to the interesting signals */
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
gst_bus_add_watch(bus, my_bus_callback, main_loop);
gst_object_unref(bus);
/* link elements */
gst_bin_add_many(GST_BIN(pipeline), source, decodeAudioSink,
decodeVideoSink, playSink, NULL);
gst_element_link(source, decodeAudioSink);
gst_element_link(source, decodeVideoSink);
gst_element_link(decodeVideoSink, playSink);
gst_element_link(decodeAudioSink, playSink);
g_signal_connect(source, "pad-added", G_CALLBACK(cb_pad_added),
(__bridge void *)self);
g_signal_connect(decodeAudioSink, "pad-added", G_CALLBACK(cb_pad_added),
(__bridge void *)self);
g_signal_connect(decodeVideoSink, "pad-added", G_CALLBACK(cb_pad_added),
(__bridge void *)self);
/* Set the pipeline to READY, so it can already accept a window handle
*/
gst_element_set_state(pipeline, GST_STATE_READY);
videoSink = gst_bin_get_by_interface(GST_BIN(pipeline),
GST_TYPE_VIDEO_OVERLAY);
if (!videoSink) {
GST_ERROR("Could not retrieve video sink");
return;
}
gst_video_overlay_set_window_handle(GST_VIDEO_OVERLAY(videoSink),
(guintptr)(id)videoView);
*and cb_pad_added callback
*
static void cb_pad_added(GstElement *dec, GstPad *pad, GStreamerBackend
*self) {
GstPad *sinkpad = NULL;
GstPadLinkReturn ret;
GstCaps *caps = NULL;
GstStructure *str = NULL;
const gchar *name = NULL;
/* check media type */
caps = gst_pad_query_caps(pad, NULL);
str = gst_caps_get_structure(caps, 0);
name = gst_structure_get_name(str);
g_print("*** Linking to %s ***\n", name);
/* We can now link this pad with the rtsp-decoder sink pad */
if (g_str_has_prefix(name, "audio")) {
GstElementClass *klass = GST_ELEMENT_GET_CLASS(self->playSink);
GstPadTemplate *templ = gst_element_class_get_pad_template(klass,
"audio_sink");
sinkpad = gst_element_request_pad(self->playSink, templ, NULL,
NULL);
} else if (g_str_has_prefix (name, "video")) {
GstElementClass *klass = GST_ELEMENT_GET_CLASS(self->playSink);
GstPadTemplate *templ = gst_element_class_get_pad_template(klass,
"video_sink");
sinkpad = gst_element_request_pad(self->playSink, templ, NULL,
NULL);
} else if (g_str_has_prefix (name, "text")) {
GstElementClass *klass = GST_ELEMENT_GET_CLASS(self->playSink);
GstPadTemplate *templ = gst_element_class_get_pad_template(klass,
"text_sink");
sinkpad = gst_element_request_pad(self->playSink, templ, NULL,
NULL);
} else {
const gchar *media = gst_structure_get_string(str, "media");
if (g_strrstr(media, "audio")) {
sinkpad = gst_element_get_static_pad(self->decodeAudioSink,
"sink");
} else if (g_strrstr(media, "video")) {
sinkpad = gst_element_get_static_pad(self->decodeVideoSink,
"sink");
}
}
gst_caps_unref(caps);
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked(sinkpad)) {
g_print("*** We are already linked ***\n");
gst_object_unref(sinkpad);
return;
} else {
g_print("proceeding to linking ...\n");
}
ret = gst_pad_link(pad, sinkpad);
if (GST_PAD_LINK_FAILED(ret)) {
//failed
g_print("failed to link dynamically\n");
} else {
//pass
g_print("dynamically link successful\n");
}
gst_object_unref (sinkpad);
}
--
Sent from: http://gstreamer-devel.966125.n4.nabble.com/
More information about the gstreamer-devel
mailing list