[Bug 704910] New: sync/ Error creating thread/Segmentation fault
GStreamer (bugzilla.gnome.org)
bugzilla at gnome.org
Thu Jul 25 20:02:12 PDT 2013
https://bugzilla.gnome.org/show_bug.cgi?id=704910
GStreamer | common | 1.1.2
Summary: sync/ Error creating thread/Segmentation fault
Classification: Platform
Product: GStreamer
Version: 1.1.2
OS/Version: Linux
Status: UNCONFIRMED
Severity: normal
Priority: Normal
Component: common
AssignedTo: gstreamer-bugs at lists.freedesktop.org
ReportedBy: 765621965 at qq.com
QAContact: gstreamer-bugs at lists.freedesktop.org
GNOME version: ---
hi all:
I recently development a project by python gstreamer.It is an recording
server. I use udpsrc and rtpbin to recieve audio stream and video stream ,and
then push them to 'mp4mux' ,finaly push the stream to 'filesink'. .The pipeline
is probably like this :
udpsrc-->rtpbin--->rtph264depay--->h264parse-->mp4mux(video_0)--
|-->***.mp4
udpsrc-->rtpbin--->...--->faac---------------->mp4mux(audio_0)--
And I use 'sip' message to control the pipelne,e.g. when I use 'Linphone' to
call the server ,the program will set the pipeline to PLAY state.
There are two serious problems:
1.audio/video not sync. The '**.mp4' is sync only in 'vlc' player,others player
audio will delay about 1s.But when I replace the 'mp4mux' to 'matroskamux',and
get the '**.mkv',it is sync with every players.Then I test
'avimux','3gpmux'...they are the same with 'mp4mux',then I am confused...
2.I write an test program to test the server.When I simulated 40 'Linphone' to
call the server ,an error accured :
(python:15406): GStreamer-WARNING **: failed to create thread: Error creating
thread: Resource temporarily unavailable
Segmentation fault (core dumped)
Can you help me ?
my programer is as follow:
#!/usr/bin/env python
# -=- encoding: utf-8 -=-
################ VIDEO RECEIVER
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
import time, socket, fcntl, struct
import os
#GObject.threads_init()
Gst.init(None)
record_dir = ''
class Recorder:
def __init__(self, callid, has_audio, has_video):
self.callid=callid
self.has_video = has_video #flag to judge if it has video
stream
self.has_audio = has_audio #flag to judge if it has audio
stream
self.call_session = None
self.session_callback = None
# create Pipeline and rtpbin
self.pipeline = Gst.Pipeline()
self.rtpbin = Gst.ElementFactory.make('rtpbin', 'rtpbin')
self.audio_dealing()
if self.has_video == True :
self.video_dealing()
def video_dealing(self):
# create elements
self.udpsrc_rtpin_video = Gst.ElementFactory.make('udpsrc', 'udpsrc0')
self.udpsrc_rtcpin_video = Gst.ElementFactory.make('udpsrc', 'udpsrc1')
self.udpsink_rtcpout_video = Gst.ElementFactory.make('udpsink',
'udpsink0')
self.rtph264depay = Gst.ElementFactory.make('rtph264depay', 'rtpdepay')
self.h264parse = Gst.ElementFactory.make('h264parse','h264parse')
#add the elements into the pipeline
self.pipeline.add(self.udpsrc_rtpin_video)
self.pipeline.add(self.udpsrc_rtcpin_video)
self.pipeline.add(self.rtph264depay)
self.pipeline.add(self.h264parse)
# Set properties
self.udpsrc_rtpin_video.set_property('caps',
Gst.caps_from_string('application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)H264'))
self.rtp_port_v = self.get_rtp_port_video()
self.udpsrc_rtpin_video.set_property('port',self.rtp_port_v)
self.udpsrc_rtcpin_video.set_property('port',self.rtp_port_v+1)
# Link elements
self.udpsrc_rtpin_video.link_pads('src',self.rtpbin ,
'recv_rtp_sink_0')
self.udpsrc_rtcpin_video.link_pads('src', self.rtpbin,
'recv_rtcp_sink_0')
self.rtph264depay.link(self.h264parse)
def audio_dealing(self):
# create elements
self.udpsrc_rtpin_audio = Gst.ElementFactory.make('udpsrc', 'udpsrc2')
self.udpsrc_rtcpin_audio = Gst.ElementFactory.make('udpsrc', 'udpsrc3')
self.udpsink_rtcpout_audio = Gst.ElementFactory.make('udpsink',
'udpsink1')
self.pcmudepay = Gst.ElementFactory.make('rtppcmudepay','rtppcmudepay')
self.mulawdec = Gst.ElementFactory.make('mulawdec','mulawdec')
self.audioresample =
Gst.ElementFactory.make('audioresample','audioresample')
self.audioenc = Gst.ElementFactory.make('faac','faac')
#add the elements into the pipeline
self.pipeline.add(self.rtpbin)
self.pipeline.add(self.udpsrc_rtpin_audio)
self.pipeline.add(self.udpsrc_rtcpin_audio)
self.pipeline.add(self.pcmudepay)
self.pipeline.add(self.mulawdec)
self.pipeline.add(self.audioresample)
self.pipeline.add(self.audioenc)
# Set properties
self.rtpbin.set_property('latency', 400)
self.udpsrc_rtpin_audio.set_property('caps',
Gst.caps_from_string('application/x-rtp,media=(string)audio,clock-rate=(int)8000,encoding-name=(string)PCMU'))
self.rtp_port_a = self.get_rtp_port_audio()
self.udpsrc_rtpin_audio.set_property('port',self.rtp_port_a)
self.udpsrc_rtcpin_audio.set_property('port',self.rtp_port_a+1)
# Link elements
self.udpsrc_rtpin_audio.link_pads('src',self.rtpbin ,
'recv_rtp_sink_1')
self.udpsrc_rtcpin_audio.link_pads('src', self.rtpbin,
'recv_rtcp_sink_1')
self.pcmudepay.link(self.mulawdec)
self.mulawdec.link(self.audioresample)
self.audioresample.link(self.audioenc)
def start_record(self):
self.mp4mux_to_filesink()
self.start_stream()
print "Started mp4 recording..."
def mp4mux_to_filesink(self):
# Create elements
if self.has_video == True:
self.q_v = Gst.ElementFactory.make('queue',None)
self.q_a = Gst.ElementFactory.make('queue',None)
self.mp4mux = Gst.ElementFactory.make('mp4mux', 'mp4mux')
self.filesink = Gst.ElementFactory.make('filesink', 'filesink')
# Set properties
global record_dir
curr_time = time.strftime('%Y-%m-%d
%H:%M:%S',time.localtime(time.time()))
filename = record_dir + str(self.callid) + '-' + curr_time + '.mp4'
self.filesink.set_property('location', filename)
self.filesink.set_property('sync','false')
if self.has_video == True:
self.q_v.set_property('leaky',1)
self.q_a.set_property('leaky',1)
# Add elements into pipeline
if self.has_video == True:
self.pipeline.add(self.q_v)
self.pipeline.add(self.q_a)
self.pipeline.add(self.mp4mux)
self.pipeline.add(self.filesink)
#link element
if self.has_video == True:
self.h264parse.link(self.q_v)
self.q_v.link_pads('src',self.mp4mux,'video_0')
self.audioenc.link(self.q_a)
self.q_a.link_pads('src',self.mp4mux,'audio_0')
self.mp4mux.link(self.filesink)
def start_stream(self):
# Set callback
self.rtpbin.connect('pad-added', self.rtpbin_pad_added)
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message::eos',self.on_eos)
self.bus.connect('message::element',self.on_timeout)
# Set state to PLAYING
self.pipeline.set_state(Gst.State.PLAYING)
self.udpsink_rtcpout_audio.set_locked_state(Gst.State.PLAYING)
if self.has_video == True:
self.udpsink_rtcpout_video.set_locked_state(Gst.State.PLAYING)
def stop_stream(self):
print 'stop'
self.pipeline.send_event(Gst.Event.new_eos())
def on_eos(self,bus,msg):
print "on_eos"
bus.remove_signal_watch()
self.pipeline.set_state(Gst.State.NULL)
print "now shut down the pipeline"
def on_timeout(self,bus,msg):
t = msg.get_structure().get_name()
if t == 'GstUDPSrcTimeout':
self.close_sip_session()
print "on_timeout"
self.pipeline.send_event(Gst.Event.new_eos())
def set_event_callback(self, call, cb):#add by tjh , set callback to close
sip session
self.call_session = call
self.session_callback = cb
def close_sip_session(self):#add by tjh , to close sip session...
self.session_callback(self.call_session, 'close')
def rtpbin_pad_added(self,obj, pad):
if self.has_video == False:
a_pad = self.pcmudepay.get_static_pad("sink")
pad.link(a_pad)
print "audio stream is coming"
else:
pad_name = pad.get_name()
if pad_name[0:14] == 'recv_rtp_src_0':
v_pad = self.rtph264depay.get_static_pad("sink")
pad.link(v_pad)
print "Video stream is coming"
elif pad_name[0:14] == 'recv_rtp_src_1':
a_pad = self.pcmudepay.get_static_pad("sink")
pad.link(a_pad)
print "audio stream is coming"
self.udpsrc_rtpin_audio.set_property('timeout', 5000000000)
def get_rtp_port_video(self):
self.udpsrc_rtpin_video.set_property('port', 0)
self.udpsrc_rtpin_video.set_state(Gst.State.PAUSED)
port_video = self.udpsrc_rtpin_video.get_property('port')
self.udpsrc_rtpin_video.set_state(Gst.State.NULL)
return port_video
def get_rtp_port_audio(self):
self.udpsrc_rtpin_audio.set_property('port', 0)
self.udpsrc_rtpin_audio.set_state(Gst.State.PAUSED)
port_audio = self.udpsrc_rtpin_audio.get_property('port')
self.udpsrc_rtpin_audio.set_state(Gst.State.NULL)
return port_audio
def get_local_address(self):
rtp_port_a = self.rtp_port_a
if self.has_video == True:
rtp_port_v = self.rtp_port_v
address =
(get_ip_address('eth0'),rtp_port_a,rtp_port_a+1,rtp_port_v, rtp_port_v+1)
else :
address = (get_ip_address('eth0'),rtp_port_a,rtp_port_a+1,0,0)
return address
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
--
Configure bugmail: https://bugzilla.gnome.org/userprefs.cgi?tab=email
------- You are receiving this mail because: -------
You are the QA contact for the bug.
You are the assignee for the bug.
More information about the gstreamer-bugs
mailing list