稳定、高效、可靠、多平台支持的RTSP-Server组件RTSP-Server组件解决多线程效率问题
背景分析
RTSP(Real Time Streaming Protocol)是由Real Network和Netscape共同提出的如何有效地在IP网络上传输流媒体数据的应用层协议。RTSP对流媒体提供了诸如暂停,快进等控制,而它本身并不传输数据,RTSP的作用相当于流媒体服务器的远程控制。服务器端可以自行选择使用TCP或UDP来传送串流内容,它的语法和运作跟HTTP 1.1类似,但并不特别强调时间同步,所以比较能容忍网络延迟。而且允许同时多个串流需求控制(Multicast),除了可以降低服务器端的网络用量,还可以支持多方视频会议(Video onference)。
关于EasyRTSPServer
EasyRTSPServer是一套稳定、高效、可靠、多平台支持的RTSP-Server组件, 接口调用非常简单成熟,无需关注RTSPServer中关于客户端监听接入、音视频多路复用、RTSP具体流程、RTP打包与发送等相关问题,支持多种音视频格式,再也不用去处理整个RTSP OPTIONS/DESCRIBE/SETUP/PLAY/RTP/RTCP的复杂流程和担心内存释放的问题了,非常适合于安防领域、教育领域、互联网直播领域等。
EasyRTSPServer解决多线程效率问题
提出问题
EasyRTSPServer基于live555改造而来,前面已说过怎样将单线程改造为多线程, 现就多线程的效率问题再补充一点说明。
解决问题
在GenericMediaServer.h中声明MultiThread_CORE_T结构体,如下:
#define MAX_BATCH_CLIENT_NUM 5
typedef struct __LIVE_THREAD_TASK_T
{
int id;
TaskScheduler *pSubScheduler;
UsageEnvironment *pSubEnv;
char liveURLSuffix[512];
int releaseChannel; //释放标记
int handleDescribe;
OSTHREAD_OBJ_T *osThread; //线程对象
int clientNum;
void *pClientConnectionPtr[MAX_BATCH_CLIENT_NUM];
void *procPtr;
void *extPtr;
}LIVE_THREAD_TASK_T;
#define MAX_DEFAULT_MULTI_THREAD_NUM 256 //最大支持通道数
typedef struct __MultiThread_CORE_T
{
int threadNum;
LIVE_THREAD_TASK_T *threadTask;
}MultiThread_CORE_T;
在GenericMediaServer构造函数中,仅创建256个MultiThread_CORE_T, 实际的线程并不在此创建;
在处理客户端的DESCRIBE请求时, 先验证请求的资源是否在已有列表中, 如没有,这时才开始创建相应的工作线程, 如下:
//如果当前是主线程,则进入到查找通道流程
if (pEnv->GetEnvirId() == MAIN_THREAD_ID)
{
UsageEnvironment *pChEnv = fOurServer.GetEnvBySuffix(pEnv, urlTotalSuffix, this, pThreadTask, True);
if (NULL == pChEnv)
{
handleCmdRet = -1;
this->pClientConnectionEnv = NULL;
handleCmd_notFound();
break;
}
else
{
_TRACE(TRACE_LOG_DEBUG, (char*)"[%s]Set socket[%d] Assign to [%d:%s]\n", pEnv->GetEnvirName(), this->fOurSocket, pChEnv->GetEnvirId(), pChEnv->GetEnvirName());
//将socket从主线程移到工作线程中
pEnv->taskScheduler().disableBackgroundHandling(fOurSocket);
return MAIN_THREAD_ID;
}
break;
}
主线程中主要调用了GenericMediaServer的GetEnvBySuffix函数,该函数实现了主线程中任务的分配,如下:
UsageEnvironment *GenericMediaServer::GetEnvBySuffix(UsageEnvironment *pMainThreadEnv, const char *urlSuffix, void *pClientConnection,
LIVE_THREAD_TASK_T **pThreadTask, Boolean bLockServerMediaSession)
{
GenericMediaServer::ClientConnection *pClient = (GenericMediaServer::ClientConnection *)pClientConnection;
int iFreeIdx = -1;
UsageEnvironment *pEnv = NULL;
if ( (int)strlen(urlSuffix) < 1)
{
return NULL;
}
char streamName[512] = {0};
int iProcRet = 0;
Boolean bRequestTooMany = False;
if (bLockServerMediaSession) LockServerMediaSession(pMainThreadEnv->GetEnvirName(), (char*)"GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
do
{
for (int i=0; i<multiThreadCore.threadNum; i++)
{
if ( (iFreeIdx<0) && (((int)strlen(multiThreadCore.threadTask[i].liveURLSuffix) < 1 )) && (multiThreadCore.threadTask[i].releaseChannel==0x00) )
{
iFreeIdx = i;
}
if ( 0 == strcmp(urlSuffix, multiThreadCore.threadTask[i].liveURLSuffix))
{
if (multiThreadCore.threadTask[i].releaseChannel>0x00)
{
iProcRet = -1;
_TRACE(TRACE_LOG_DEBUG, (char *)"[%s] 当前通道正在被删除. 请稍候访问: %s\n", multiThreadCore.threadTask[i].pSubEnv->GetEnvirName(), urlSuffix);
break;
}
if (NULL == multiThreadCore.threadTask[i].pSubEnv)
{
iProcRet = -2;
break;
}
if (multiThreadCore.threadTask[i].pSubEnv->GetStreamStatus() == 0x00)
{
iProcRet = -3;
break;
}
multiThreadCore.threadTask[i].pSubEnv->LockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
if (multiThreadCore.threadTask[i].pSubEnv->GetLockFlag() != 0x00)
{
iProcRet = -4;
multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
break;
}
bool assignEnv = false;
for (int k=0; k<MAX_BATCH_CLIENT_NUM; k++)
{
if (NULL == multiThreadCore.threadTask[i].pClientConnectionPtr[k])
{
assignEnv = true;
multiThreadCore.threadTask[i].pClientConnectionPtr[k] = pClient;
_TRACE(TRACE_LOG_INFO, (char*)"GenericMediaServer::GetEnvBySuffix [%s] set [%d] to Index[%d]\n", urlSuffix, pClient->fOurSocket, k);
strcpy(streamName, urlSuffix);
break;
}
}
if (assignEnv)
{
pEnv = multiThreadCore.threadTask[i].pSubEnv;
//multiThreadCore.threadTask[i].subSocket = pClient->fOurSocket;
pClient->pClientConnectionEnv = multiThreadCore.threadTask[i].pSubEnv;
//multiThreadCore.threadTask[i].handleDescribe = 0x01;
//*handleDescribe = &multiThreadCore.threadTask[i].handleDescribe;
if (NULL != pThreadTask) *pThreadTask = &multiThreadCore.threadTask[i];
multiThreadCore.threadTask[i].clientNum ++;
pEnv->IncrementReferenceCount(); //增加引用计数
iProcRet = 0;
_TRACE(TRACE_LOG_INFO, (char*)"共用通道GenericMediaServer::GetEnvBySuffix:: Channel already exist. New Connection[%d] [%s][%s] ClientNum[%d]\n",
pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(), urlSuffix,
multiThreadCore.threadTask[i].clientNum);
}
else
{
//没有找到有效的Env, 说明客户端列表已满
iProcRet = -10;
_TRACE(TRACE_LOG_ERROR, (char*)"GenericMediaServer::GetEnvBySuffix 当前通道客户端已满[%s]\n", urlSuffix);
}
multiThreadCore.threadTask[i].pSubEnv->UnlockEnvir("GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
break;
}
}
if (pEnv) break;
if (iFreeIdx<0) break;
if (iProcRet < 0) break;
if (NULL == multiThreadCore.threadTask[iFreeIdx].osThread)
{
CreateOSThread( &multiThreadCore.threadTask[iFreeIdx].osThread, __WorkerThread_Proc, (void *)&multiThreadCore.threadTask[iFreeIdx] );
}
multiThreadCore.threadTask[iFreeIdx].pClientConnectionPtr[0] = pClient;
pClient->pClientConnectionEnv = multiThreadCore.threadTask[iFreeIdx].pSubEnv;
#ifdef _DEBUG
for (int i=0; i<multiThreadCore.threadNum; i++)
{
if ( (int)strlen(multiThreadCore.threadTask[i].liveURLSuffix) > 0)
{
_TRACE(TRACE_LOG_DEBUG, (char *)"通道列表[%d:%s]: %s\n", i, multiThreadCore.threadTask[i].pSubEnv->GetEnvirName(), multiThreadCore.threadTask[i].liveURLSuffix);
if ( (0 == strcmp(urlSuffix, multiThreadCore.threadTask[i].liveURLSuffix)) )
{
multiThreadCore.threadTask[i].releaseChannel = multiThreadCore.threadTask[i].releaseChannel;
}
}
}
#endif
pEnv = pClient->pClientConnectionEnv;
strcpy(multiThreadCore.threadTask[iFreeIdx].liveURLSuffix, urlSuffix);
strcpy(streamName, multiThreadCore.threadTask[iFreeIdx].liveURLSuffix);
pEnv->IncrementReferenceCount(); //增加引用计数
if (NULL != pThreadTask) *pThreadTask = &multiThreadCore.threadTask[iFreeIdx];
multiThreadCore.threadTask[iFreeIdx].clientNum ++;
_TRACE(TRACE_LOG_INFO, (char*)"新建通道 GenericMediaServer::GetEnvBySuffix New Connection[%d] [%s][%s] ClientNum[%d]\n",
pClient->fOurSocket, pClient->pClientConnectionEnv->GetEnvirName(),
multiThreadCore.threadTask[iFreeIdx].liveURLSuffix,
multiThreadCore.threadTask[iFreeIdx].clientNum);
}while (0);
if (bLockServerMediaSession) UnlockServerMediaSession(pMainThreadEnv->GetEnvirName(), "GenericMediaServer::GetEnvBySuffix", (unsigned long long)this);
//UnlockClientConnection();
if (NULL != pEnv)
{
if ( (int)strlen(streamName) < 1)
{
_TRACE(TRACE_LOG_DEBUG, (char *)"#### ERROR\n");
}
}
return pEnv;
}
到这里为止,针对当前客户端,主线程的工作已全部结束, 剩下的就是工作线程来工作了,工作线程被创建成功后,一直检测是否有需要处理的任务, 如果有新的客户端被分配到该线程,则又从handleCmd_DESCRIBE开始处理。
总结
工作线程和主线程的衔接点是在RTSPServer::RTSPClientConnection
::handleCmd_DESCRIBE,即主线程仅处理到DESCRIBE命令,后面的处理全部由工作线程完成。