Gstreamer iOs and webrtcdsp for echo cancelation

sotiris returnttsource at gmail.com
Fri Nov 16 12:08:51 UTC 2018


Hello, we are developing an ios Voip application that uses gstreamer.
I would like to use the webrtcdsp for echo cancelation .
The application uses udp to send the audio to other devices over the network 

Is there a src code  example on how to enable this?

The pipeline looks something like this

    GstBus *bus_receiving, *bus_sending;
    GSource *bus_source_receiving, *bus_source_sending;
    
    /*****  Receiving Data  *****/
    
    GstCaps *caps;
    GstElement
*rcvInput,*rcvqueue[2],*rcvBuffer,*rcvExtractAudio,*rcvDecoder,*rcvConverter,*rcvSampler,*rcvSpeakerOutput;
    
    pipeline_receiving=gst_pipeline_new("rcv-data");
    rcvInput = gst_element_factory_make("udpsrc","rcv-input");
    rcvqueue[0] = gst_element_factory_make ("queue", NULL);
    rcvBuffer =gst_element_factory_make("rtpjitterbuffer","rcv-buffer");
    rcvExtractAudio
=gst_element_factory_make("rtppcmadepay","rcv-extract-pcma-audio");
    rcvDecoder =gst_element_factory_make("alawdec","rcv-decoder");
    rcvConverter =gst_element_factory_make("audioconvert","rcv-converter");
    rcvSampler =gst_element_factory_make("audioresample","rcv-sampler");
//remove it
    rcvqueue[1] = gst_element_factory_make ("queue", NULL);
    rcvSpeakerOutput =gst_element_factory_make("osxaudiosink","rcv-output");
    
    
    if (!pipeline_receiving || !rcvInput || !rcvBuffer || !rcvExtractAudio
|| !rcvDecoder || !rcvConverter || !rcvSampler || !rcvSpeakerOutput) {
        gchar *message = g_strdup_printf("Unable to create one or more GST
Elements: %s", "pipeline_receiving, rcvInput, rcvBuffer, rcvExtractAudio,
rcvDecoder, rcvConverter, rcvSampler, rcvSpeakerOutput");
        g_free (message);
        return;
    }
    
    // set up settings
    caps = gst_caps_new_simple ("application/x-rtp",
                                "media", G_TYPE_STRING, "audio",
                                "clock-rate", G_TYPE_INT, 8000,
                                "channels",     G_TYPE_INT,     1,
                                "encoding-name", G_TYPE_STRING, "PCMA",
                                "payload", G_TYPE_INT, 8,
                                NULL);
   
    
   
g_object_set(G_OBJECT(rcvInput),"caps",caps,"port",localPort.intValue,NULL);//gst_caps_unref(caps);
    g_object_set(G_OBJECT(rcvBuffer),"latency",300,NULL); //the jitter
buffer should be arround 30-50ms (8kHz sampling corresponds to 240-400
samples)
    
    //some blocks have been removed due to high cpu demand.
   
gst_bin_add_many(GST_BIN(pipeline_receiving),rcvInput,rcvBuffer,rcvExtractAudio,rcvDecoder,rcvConverter,rcvSpeakerOutput,NULL);
   
if(!gst_element_link_many(rcvInput,rcvBuffer,rcvExtractAudio,rcvDecoder,rcvConverter,rcvSpeakerOutput,NULL))
    {
        [_log error:@"Elements could not be linked Receiving pipeline"
from:NSStringFromSelector(_cmd) className:NSStringFromClass([self class])];
        //gst_object_unref (pipeline_receiving);
        return;
    }
    
    //in every src, e.g. osxaudiosrc, rtpdtmfsrc there should be a queue
after
    
   
    
    GstElement *dtmf_input,*queue[2],*audioresample, *alawenc, *rtppcmapay,
*rtpdtmfmux, *udpsink;
    gint64 ptime=20*1000000;
    NSString *pt = settings[@"payloadtype"];
    [_log status:[NSString stringWithFormat:@"payloadtype is:%@",pt]
from:NSStringFromSelector(_cmd) className:NSStringFromClass([self class])];
    pipeline_sending=gst_pipeline_new("sending-data");
    micinput = gst_element_factory_make("osxaudiosrc","snd-microphone");
    //micinput = gst_element_factory_make ("autoaudiosrc", "audiosrc");
    audioconvert = gst_element_factory_make("audioconvert",NULL);
    audioresample = gst_element_factory_make("audioresample",NULL);
    alawenc = gst_element_factory_make("alawenc","snd-encoder");
    rtppcmapay = gst_element_factory_make("rtppcmapay",NULL);
   
g_object_set(G_OBJECT(rtppcmapay),"min-ptime",ptime,"max-ptime",ptime,NULL);
    queue[0] = gst_element_factory_make ("queue", NULL);
    dtmf_input=gst_element_factory_make("rtpdtmfsrc",NULL);
    queue[1] = gst_element_factory_make ("queue", NULL);
    if(pt.intValue == 255){
     [_log error:@"Payload type for dtmf is null"
from:NSStringFromSelector(_cmd) className:NSStringFromClass([self class])];
    }
    else{
        g_object_set(G_OBJECT(dtmf_input),"pt",pt.intValue,NULL);
    }
                 
    rtpdtmfmux=gst_element_factory_make("rtpdtmfmux",NULL);
    udpsink = gst_element_factory_make("udpsink",NULL);
   
g_object_set(G_OBJECT(udpsink),"port",remotePort.intValue,"host",remoteAddress.UTF8String,"bind-port",[localPort
intValue],NULL);
    
    
    if (!pipeline_sending || !micinput  || !audioconvert || !audioresample
|| !alawenc || !rtppcmapay || !udpsink || !dtmf_input || !rtpdtmfmux) {
        gchar *message = g_strdup_printf("Unable to create one or more GST
Elements: %s", "pipeline_sending, source, converter, sampler, encoder,
payload_encoder, sink, dtmf_input, rtpdtmfmux");
        [_log error:@"Link
audioconvert,audioresample,alawenc,rtppcmapay,udpsink..."
from:NSStringFromSelector(_cmd) className:NSStringFromClass([self class])];
        g_free (message);
        return;
    }
    
    
   
gst_bin_add_many(GST_BIN(pipeline_sending),micinput,queue[0],audioconvert,audioresample,alawenc,rtppcmapay,queue[1],udpsink,rtpdtmfmux,dtmf_input,NULL);
   
if(!gst_element_link_many(micinput,audioconvert,audioresample,alawenc,rtppcmapay,queue[0],NULL))
    {
        [_log error:@"Elements could not be linked Sending pipeline1"
from:NSStringFromSelector(_cmd) className:NSStringFromClass([self class])];
        return;
    }
    if(!gst_element_link_many(rtpdtmfmux,udpsink,NULL))
    {
        [_log error:@"Elements could not be linked Sending pipeline2"
from:NSStringFromSelector(_cmd) className:NSStringFromClass([self class])];
        return;
    }
    if(!gst_element_link_many(dtmf_input,queue[1],NULL))
    {
        [_log error:@"Elements could not be linked Sending pipeline3"
from:NSStringFromSelector(_cmd) className:NSStringFromClass([self class])];
        return;
    }
    
    gst_element_link_pads(queue[0],"src",rtpdtmfmux,"sink_0");
    gst_element_link_pads(queue[1],"src",rtpdtmfmux,"priority_sink_0");
    
    
    /* Create our own GLib Main Context and make it the default one */
    context = g_main_context_new();
    g_main_context_push_thread_default(context);
    
    // receiving
    /* Instruct the bus to emit signals for each received message, and
connect to the interesting signals */
    bus_receiving = gst_element_get_bus (pipeline_receiving);
    bus_source_receiving = gst_bus_create_watch (bus_receiving);
    g_source_set_callback (bus_source_receiving, (GSourceFunc)
gst_bus_async_signal_func, NULL, NULL);
    g_source_attach (bus_source_receiving, context);
    g_source_unref (bus_source_receiving);
    g_signal_connect (G_OBJECT (bus_receiving), "message::error",
G_CALLBACK(error_cb_receiving), (__bridge void *)self);
    g_signal_connect (G_OBJECT (bus_receiving), "message::state-changed",
G_CALLBACK(state_changed_cb_receiving), (__bridge void *)self);
    gst_object_unref (bus_receiving);
    
    
    // sending
    /* Instruct the bus to emit signals for each received message, and
connect to the interesting signals */
    bus_sending = gst_element_get_bus (pipeline_sending);
    bus_source_sending = gst_bus_create_watch (bus_sending);
    g_source_set_callback (bus_source_sending, (GSourceFunc)
gst_bus_async_signal_func, NULL, NULL);
    g_source_attach (bus_source_sending, context);
    g_source_unref (bus_source_sending);
    g_signal_connect (G_OBJECT (bus_sending), "message::error", 
G_CALLBACK(error_cb_sending), (__bridge void *)self);
    g_signal_connect (G_OBJECT (bus_sending), "message::state-changed",
G_CALLBACK(state_changed_cb_sending), (__bridge void *)self);
    gst_object_unref (bus_sending);



--
Sent from: http://gstreamer-devel.966125.n4.nabble.com/


More information about the gstreamer-devel mailing list