点播请求:RTSP端口监听流程
服务器启动后会将监听的端口加入到EventThread::Entry()阻塞等待请求的到来,具体流程如下:
1. 服务器启动时会调用QTSServer::Initialize()---->QTSServer::CreateListeners()
Bool16 QTSServer::CreateListeners(Bool16 startListeningNow, QTSServerPrefs* inPrefs, UInt16 inPortOverride)
{
......
// Create any new listeners we need
for (UInt32 count3 = 0; count3 < theTotalRTSPPortTrackers; count3++)
{
if (theRTSPPortTrackers[count3].fNeedsCreating)
{
newListenerArray[curPortIndex] = NEW RTSPListenerSocket();
QTSS_Error err = newListenerArray[curPortIndex]->Initialize(theRTSPPortTrackers[count3].fIPAddr, theRTSPPortTrackers[count3].fPort);
char thePortStr[20];
qtss_sprintf(thePortStr, "%hu", theRTSPPortTrackers[count3].fPort);
//
// If there was an error creating this listener, destroy it and log an error
if ((startListeningNow) && (err != QTSS_NoErr))
delete newListenerArray[curPortIndex];
if (err == EADDRINUSE)
QTSSModuleUtils::LogError(qtssWarningVerbosity, qtssListenPortInUse, 0, thePortStr);
else if (err == EACCES)
QTSSModuleUtils::LogError(qtssWarningVerbosity, qtssListenPortAccessDenied, 0, thePortStr);
else if (err != QTSS_NoErr)
QTSSModuleUtils::LogError(qtssWarningVerbosity, qtssListenPortError, 0, thePortStr);
else
{
//
// This listener was successfully created.
if (startListeningNow)
newListenerArray[curPortIndex]->RequestEvent(EV_RE);
curPortIndex++;
}
}
}
......
}
2.QTSServer::CreateListeners()调用RTSPListenerSocket::Initialize()完成端口的绑定与监听。
OS_Error TCPListenerSocket::Initialize(UInt32 addr, UInt16 port)
{
OS_Error err = this->TCPSocket::Open();
if (0 == err) do
{
// set SO_REUSEADDR socket option before calling bind.
#ifndef __Win32__
// this causes problems on NT (multiple processes can bind simultaneously),
// so don't do it on NT.
this->ReuseAddr();
#endif
err = this->Bind(addr, port);
if (err != 0) break; // don't assert this is just a port already in use.
//
// Unfortunately we need to advertise a big buffer because our TCP sockets
// can be used for incoming broadcast data. This could force the server
// to run out of memory faster if it gets bogged down, but it is unavoidable.
this->SetSocketRcvBufSize(96 * 1024);
err = this->listen(kListenQueueLength);
AssertV(err == 0, OSThread::GetErrno());
if (err != 0) break;
} while (false);
return err;
}
3.QTSServer::CreateListeners()调用RTSPListenerSocket::RequestEvent()注册触发响应的IO事件,并将RTSPListenerSocket实例对应的id加入到EventThread的处理列表fRefTable中
void EventContext::RequestEvent(int theMask)
{
#if DEBUG
fModwatched = true;
#endif
//
// The first time this function gets called, we're supposed to
// call watchevent. Each subsequent time, call modwatch. That's
// the way the MacOS X event queue works.
if (fWatchEventCalled)
{
fEventReq.er_eventbits = theMask;
#if MACOSXEVENTQUEUE
if (modwatch(&fEventReq, theMask) != 0)
#else
#if defined(__linux__) && !defined(EASY_DEVICE)
if (addEpollEvent(&fEventReq, theMask) != 0)
#else
if (select_modwatch(&fEventReq, theMask) != 0)//注册监听的IO事件
#endif
#endif
AssertV(false, OSThread::GetErrno());
}
else
{
//allocate a Unique ID for this socket, and add it to the ref table
//johnson find the bug
bool bFindValid = false;
#ifdef __Win32__
//
// Kind of a hack. On Win32, the way that we pass around the unique ID is
// by making it the message ID of our Win32 message (see win32ev.cpp).
// Messages must be >= WM_USER. Hence this code to restrict the numberspace
// of our UniqueIDs.
do
{
if (!compare_and_store(8192, WM_USER, &sUniqueID)) // Fix 2466667: message IDs above a
fUniqueID = (PointerSizedInt)atomic_add(&sUniqueID, 1); // level are ignored, so wrap at 8192
else
fUniqueID = (PointerSizedInt)WM_USER;
//If the fUniqueID is used, find a new one until it's free
OSRef * ref = fEventThread->fRefTable.Resolve(&fUniqueIDStr);
if (ref != NULL)
{
fEventThread->fRefTable.Release(ref);
}
else
{
bFindValid = true;// ok, it's free
}
} while (0);
#else
do
{
if (!compare_and_store(10000000, 1, &sUniqueID))
fUniqueID = (PointerSizedInt)atomic_add(&sUniqueID, 1);
else
fUniqueID = 1;
//If the fUniqueID is used, find a new one until it's free
OSRef * ref = fEventThread->fRefTable.Resolve(&fUniqueIDStr);
if (ref != NULL)
{
fEventThread->fRefTable.Release(ref);
}
else
{
bFindValid = true;// ok, it's free
}
} while (0);
#endif
fRef.Set(fUniqueIDStr, this);
fEventThread->fRefTable.Register(&fRef);//将ID加入到EventThread的处理列表中
//fill out the eventreq data structure
::memset(&fEventReq, '\0', sizeof(fEventReq));
fEventReq.er_type = EV_FD;
fEventReq.er_handle = fFileDesc;
fEventReq.er_eventbits = theMask;
fEventReq.er_data = (void*)fUniqueID;
fWatchEventCalled = true;
#if MACOSXEVENTQUEUE
if (watchevent(&fEventReq, theMask) != 0)
#else
#if defined(__linux__) && !defined(EASY_DEVICE)
if (addEpollEvent(&fEventReq, theMask) != 0)
#else
if (select_modwatch(&fEventReq, theMask) != 0)
#endif
#endif
//this should never fail, but if it does, cleanup.
AssertV(false, OSThread::GetErrno());
}
}
4.EventThread::Entry()中调用select_waitevent(&theCurrentEvent, NULL)阻塞等待注册IO事件发生。
点播请求:RTSP连接建立
客户端发起点播请求,触发服务器的建立RTSP连接事件
针对RTSP协议,Darwing Streaming Server在554端口上侦听,当有连接请求到达时,通过accept调用返回一个socket,对应的后续RTSP请求都是通过这个socket来传送的。我们把RTSP相关的事件分成两类,一类是RTSP连接请求,一类是RTSP请求。先来看RTSP连接请求的过程:
1. RTSP连接到达后,select_waitevent函数阻塞等待网络IO事件会返回,代码在EventContext.cpp的EventThread::Entry中。
void EventThread::Entry()
{
struct eventreq theCurrentEvent;
::memset(&theCurrentEvent, '\0', sizeof(theCurrentEvent));
while (true)
{
int theErrno = EINTR;
while (theErrno == EINTR)
{
#if MACOSXEVENTQUEUE
int theReturnValue = waitevent(&theCurrentEvent, NULL);
#else
#if defined(__linux__) && !defined(EASY_DEVICE)
int theReturnValue = epoll_waitevent(&theCurrentEvent, NULL);
#else
int theReturnValue = select_waitevent(&theCurrentEvent, NULL);
#endif
#endif
//Sort of a hack. In the POSIX version of the server, waitevent can return
//an actual POSIX errorcode.
if (theReturnValue >= 0)
theErrno = theReturnValue;
else
theErrno = OSThread::GetErrno();
}
......
}
2.select_waitevent返回当前的事件对象,通过事件对象中的er_data获取相应的id,使用该id在EventThread::fRefTable中查找对应的EventContext。得到的是EventContext类型的派生类RTSPListenerSocket。相应的代码在EventContext.cpp中的Entry中。
void EventThread::Entry()
{
......
//ok, there's data waiting on this socket. Send a wakeup.
if (theCurrentEvent.er_data != NULL)
{
//The cookie in this event is an ObjectID. Resolve that objectID into
//a pointer.
StrPtrLen idStr((char*)&theCurrentEvent.er_data, sizeof(theCurrentEvent.er_data));
OSRef* ref = fRefTable.Resolve(&idStr);
if (ref != NULL)
{
EventContext* theContext = (EventContext*)ref->GetObject();
#if DEBUG
theContext->fModwatched = false;
#endif
theContext->ProcessEvent(theCurrentEvent.er_eventbits);
fRefTable.Release(ref);
}
}
......
}
3.entry函数中接着调用ProcessEvent处理事件。相应的代码在EventContext.cpp中的Entry中。
theContext->ProcessEvent(theCurrentEvent.er_eventbits);
注意,由于对应的EventContext类其实指向的是RTSPListenerSocket,因此调用的应该是TCPListenerSocket::ProcessEvent。
void TCPListenerSocket::ProcessEvent(int /*eventBits*/)
{
//we are executing on the same thread as every other
//socket, so whatever you do here has to be fast.
struct sockaddr_in addr;
#if __Win32__ || __osf__ || __sgi__ || __hpux__
int size = sizeof(addr);
#else
socklen_t size = sizeof(addr);
#endif
Task* theTask = NULL;
TCPSocket* theSocket = NULL;
//fSocket data member of TCPSocket.
int osSocket = accept(fFileDesc, (struct sockaddr*)&addr, &size);
//test osSocket = -1;
if (osSocket == -1)
{
//take a look at what this error is.
int acceptError = OSThread::GetErrno();
if (acceptError == EAGAIN)
{
//If it's EAGAIN, there's nothing on the listen queue right now,
//so modwatch and return
this->RequestEvent(EV_RE);
return;
}
//test acceptError = ENFILE;
//test acceptError = EINTR;
//test acceptError = ENOENT;
//if these error gets returned, we're out of file desciptors,
//the server is going to be failing on sockets, logs, qtgroups and qtuser auth file accesses and movie files. The server is not functional.
if (acceptError == EMFILE || acceptError == ENFILE)
{
#ifndef __Win32__
QTSSModuleUtils::LogErrorStr(qtssFatalVerbosity, "Out of File Descriptors. Set max connections lower and check for competing usage from other processes. Exiting.");
#endif
exit(EXIT_FAILURE);
}
else
{
char errStr[256];
errStr[sizeof(errStr) - 1] = 0;
qtss_snprintf(errStr, sizeof(errStr) - 1, "accept error = %d '%s' on socket. Clean up and continue.", acceptError, strerror(acceptError));
WarnV((acceptError == 0), errStr);
theTask = this->GetSessionTask(&theSocket);
if (theTask == NULL)
{
close(osSocket);
}
else
{
theTask->Signal(Task::kKillEvent); // just clean up the task
}
if (theSocket)
theSocket->fState &= ~kConnected; // turn off connected state
return;
}
}
theTask = this->GetSessionTask(&theSocket);
if (theTask == NULL)
{ //this should be a disconnect. do an ioctl call?
close(osSocket);
if (theSocket)
theSocket->fState &= ~kConnected; // turn off connected state
}
else
{
Assert(osSocket != EventContext::kInvalidFileDesc);
//set options on the socket
//we are a server, always disable nagle algorithm
int one = 1;
int err = ::setsockopt(osSocket, IPPROTO_TCP, TCP_NODELAY, (char*)&one, sizeof(int));
AssertV(err == 0, OSThread::GetErrno());
err = ::setsockopt(osSocket, SOL_SOCKET, SO_KEEPALIVE, (char*)&one, sizeof(int));
AssertV(err == 0, OSThread::GetErrno());
int sndBufSize = 96L * 1024L;
err = ::setsockopt(osSocket, SOL_SOCKET, SO_SNDBUF, (char*)&sndBufSize, sizeof(int));
AssertV(err == 0, OSThread::GetErrno());
//setup the socket. When there is data on the socket,
//theTask will get an kReadEvent event
theSocket->Set(osSocket, &addr);
theSocket->InitNonBlocking(osSocket);
theSocket->SetTask(theTask);
theSocket->RequestEvent(EV_RE);
theTask->SetThreadPicker(Task::GetBlockingTaskThreadPicker()); //The Message Task processing threads
}
if (fSleepBetweenAccepts)
{
// We are at our maximum supported sockets
// slow down so we have time to process the active ones (we will respond with errors or service).
// wake up and execute again after sleeping. The timer must be reset each time through
//qtss_printf("TCPListenerSocket slowing down\n");
this->SetIdleTimer(kTimeBetweenAcceptsInMsec); //sleep 1 second
}
else
{
// sleep until there is a read event outstanding (another client wants to connect)
//qtss_printf("TCPListenerSocket normal speed\n");
this->RequestEvent(EV_RE);
}
fOutOfDescriptors = false; // always false for now we don't properly handle this elsewhere in the code
}
4.TCPListenerSocket::ProcessEvent方法中,会调用accept得到连接的socket,通过执行theTask = this->GetSessionTask(&theSocket),实际调用的是RTSPListenerSocket::GetSessionTask。
Task* RTSPListenerSocket::GetSessionTask(TCPSocket** outSocket)
{
Assert(outSocket != NULL);
// when the server is behing a round robin DNS, the client needs to knwo the IP address ot the server
// so that it can direct the "POST" half of the connection to the same machine when tunnelling RTSP thru HTTP
Bool16 doReportHTTPConnectionAddress = QTSServerInterface::GetServer()->GetPrefs()->GetDoReportHTTPConnectionAddress();
RTSPSession* theTask = NEW RTSPSession(doReportHTTPConnectionAddress);
*outSocket = theTask->GetSocket(); // out socket is not attached to a unix socket yet.
if (this->OverMaxConnections(0))
this->SlowDown();
else
this->RunNormal();
return theTask;
}
5.在RTSPListenerSocket::GetSessionTask方法中,
调用RTSPSession* theTask = NEW RTSPSession(doReportHTTPConnectionAddress)建立了一个新的RTSPSession。
6.回到TCPListenerSocket.cpp文件中的TCPListenerSocket::ProcessEvent方法,会对将accept返回的osSocket作为fd设置给theSocket(TCPSocket)。然后调用this->RequestEvent(EV_RE)把刚刚建立好的RTSP连接的fd加入到侦听队列,等待网络IO事件的到来。
点播请求:RTSP请求处理
RTSP请求的处理流程步骤如下:
1. RTSP请求到达后,被select_waitevent函数捕获,代码在EventContext.cpp的EventThread::Entry中。
int theReturnValue = waitevent(&theCurrentEvent, NULL);
2. 查找EventThread::fRefTable,获取对应的EventContext。得到的是EventContext类。相应的代码在EventContext.cpp的Entry中,如下:
//ok, there's data waiting on this socket. Send a wakeup.
if (theCurrentEvent.er_data != NULL)
{
//The cookie in this event is an ObjectID. Resolve that objectID into
//a pointer.
StrPtrLen idStr((char*)&theCurrentEvent.er_data, sizeof(theCurrentEvent.er_data));
OSRef* ref = fRefTable.Resolve(&idStr);
if (ref != NULL)
{
EventContext* theContext = (EventContext*)ref->GetObject();
#if DEBUG
theContext->fModwatched = false;
#endif
theContext->ProcessEvent(theCurrentEvent.er_eventbits);
fRefTable.Release(ref);
}
}
3. 调用ProcessEvent,处理事件。
注意,此时调用的是EventContext::ProcessEvent。
virtual void ProcessEvent(int /*eventBits*/)
{
if (EVENTCONTEXT_DEBUG)
{
if (fTask == NULL)
qtss_printf("EventContext::ProcessEvent context=%p task=NULL\n", (void *) this);
else
qtss_printf("EventContext::ProcessEvent context=%p task=%p TaskName=%s\n", (void *)this, (void *)fTask, fTask->fTaskName);
}
if (fTask != NULL)
fTask->Signal(Task::kReadEvent);
}
4. EventContext::ProcessEvent方法在EventContext.h中实现,然后调用了fTask->Signal(Task::kReadEvent),把RTSPSession加入到TaskThread的队列中等待RTSPSession::Run()被调用执行,fTask是RTSPSession类的实例。
5. 后续就是RTSPSession::Run()对RTSP请求的具体的处理。