在前面的博客中有简单介绍MediaPlayer,最近又开始研究这块东西,在此把阅读代码的理解记录下来方便以后快速查阅。
播放普通文件传入的url是一个本地的绝对路径,但是流媒体的话传入的就是一个网络地址如以"http://“开头的流媒体和以"rtsp://"开头的流媒体协议。
下面从源码中的Awesomeplayer.cpp finishSetDataSource_l函数入手(也相当与mediaplayer调用了prepare后,开始做一些准备工作,如音频流和视频流都要准备好)
status_t AwesomePlayer::finishSetDataSource_l() { sp<DataSource> dataSource; bool isM3u8 = false; String8 surfix; surfix.setTo(mUri.string() + (mUri.size() - 5)); isM3u8 = !strncasecmp(".m3u8", surfix, 5); if (!strncasecmp("http://", mUri.string(), 7) && (!isM3u8)) { mConnectingDataSource = new NuHTTPDataSource; mLock.unlock(); status_t err = mConnectingDataSource->connect(mUri, &mUriHeaders); ... } else if (!strncasecmp("rtsp://", mUri.string(), 7)) { if (mLooper == NULL) { mLooper = new ALooper; mLooper->setName("rtsp"); mLooper->start(); } mRTSPController = new ARTSPController(mLooper); status_t err = mRTSPController->connect(mUri.string()); LOGI("ARTSPController::connect returned %d", err); if (err != OK) { mRTSPController.clear(); return err; } sp<MediaExtractor> extractor = mRTSPController.get(); return setDataSource_l(extractor); }
重点是在ARTSPController的connect函数
status_t ARTSPController::connect(const char *url) { Mutex::Autolock autoLock(mLock); if (mState != DISCONNECTED) { return ERROR_ALREADY_CONNECTED; } sp<AMessage> msg = new AMessage(kWhatConnectDone, mReflector->id()); mHandler = new MyHandler(url, mLooper); mState = CONNECTING; mHandler->connect(msg); while (mState == CONNECTING) { mCondition.wait(mLock); } if (mState != CONNECTED) { mHandler.clear(); } return mConnectionResult; }
while (mState == CONNECTING) {
mCondition.wait(mLock);
}
在等待状态变成CONNECTED此函数才会返回。
我们需要清除状态是如何变化的:
首先创建了一个sp<AMessage> msg = new AMessage(kWhatConnectDone, mReflector->id()); 对象类型是kWhatConnectDone,id是mReflector->id()
这个id十分关键,它确定了此消息的最终接收对象,我们看mReflector的由来,
ARTSPController::ARTSPController(const sp<ALooper> &looper) : mState(DISCONNECTED), mLooper(looper), mSeekDoneCb(NULL), mSeekDoneCookie(NULL), mLastSeekCompletedTimeUs(-1) { mReflector = new AHandlerReflector<ARTSPController>(this); looper->registerHandler(mReflector); }它是在ARTSPController的构造函数中创建的,并且被注册到了looper里面,这个looper不知道的可以自己看下源码,简单解释下就是一个循环的消息队列,它里面保存了一个handler列表(每个handler有唯一的id,也就是上面我们提到的id),当消息被触发时就会通过id来找到对应的handler。
紧接着我们再看MyHandler的connect函数
void connect(const sp<AMessage> &doneMsg) { mDoneMsg = doneMsg; mLooper->registerHandler(this); mLooper->registerHandler(mConn); (1 ? mNetLooper : mLooper)->registerHandler(mRTPConn); sp<AMessage> notify = new AMessage('biny', id()); mConn->observeBinaryData(notify); sp<AMessage> reply = new AMessage('conn', id()); mConn->connect(mOriginalSessionURL.c_str(), reply); }它传入的参数实际就是上面的sp<AMessage> msg = new AMessage(kWhatConnectDone, mReflector->id());
也就是什么时候完成连接的动作就会触发此message
两个重要的地方
sp<AMessage> notify = new AMessage('biny', id());
mConn->observeBinaryData(notify);
void ARTSPConnection::observeBinaryData(const sp<AMessage> &reply) { sp<AMessage> msg = new AMessage(kWhatObserveBinaryData, id()); msg->setMessage("reply", reply); msg->post(); }此消息通过id可以看出是发送给ARTSPConnection自己的,注意setMessage参数reply,字面上理解是回复,也就是说执行完此消息后需要回复的调用reply,而这里的reply就是
AMessage('biny', id());
void ARTSPConnection::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatConnect: onConnect(msg); break; case kWhatDisconnect: onDisconnect(msg); break; case kWhatCompleteConnection: onCompleteConnection(msg); break; case kWhatSendRequest: onSendRequest(msg); break; case kWhatReceiveResponse: onReceiveResponse(); break; case kWhatObserveBinaryData: { CHECK(msg->findMessage("reply", &mObserveBinaryMessage)); break; } default: TRESPASS(); break; } }
这个才是连接的关键
首先reply绑定的id是MyHandler的id,也就是说最终会回到MyHandler的onMessageReceived case 'conn':中来。
void ARTSPConnection::connect(const char *url, const sp<AMessage> &reply) { sp<AMessage> msg = new AMessage(kWhatConnect, id()); msg->setString("url", url); msg->setMessage("reply", reply); msg->post(); }
void ARTSPConnection::onConnect(const sp<AMessage> &msg) { ++mConnectionID; if (mState != DISCONNECTED) { close(mSocket); mSocket = -1; flushPendingRequests(); } mState = CONNECTING; AString url; CHECK(msg->findString("url", &url)); sp<AMessage> reply; CHECK(msg->findMessage("reply", &reply)); AString host, path; unsigned port; if (!ParseURL(url.c_str(), &host, &port, &path, &mUser, &mPass) || (mUser.size() > 0 && mPass.size() == 0)) { // If we have a user name but no password we have to give up // right here, since we currently have no way of asking the user // for this information. LOGE("Malformed rtsp url %s", url.c_str()); reply->setInt32("result", ERROR_MALFORMED); reply->post(); mState = DISCONNECTED; return; } if (mUser.size() > 0) { LOGV("user = '%s', pass = '%s'", mUser.c_str(), mPass.c_str()); } struct hostent *ent = gethostbyname(host.c_str()); if (ent == NULL) { LOGE("Unknown host %s", host.c_str()); reply->setInt32("result", -ENOENT); reply->post(); mState = DISCONNECTED; return; } mSocket = socket(AF_INET, SOCK_STREAM, 0); MakeSocketBlocking(mSocket, false); struct sockaddr_in remote; memset(remote.sin_zero, 0, sizeof(remote.sin_zero)); remote.sin_family = AF_INET; remote.sin_addr.s_addr = *(in_addr_t *)ent->h_addr; remote.sin_port = htons(port); int err = ::connect( mSocket, (const struct sockaddr *)&remote, sizeof(remote)); reply->setInt32("server-ip", ntohl(remote.sin_addr.s_addr)); if (err < 0) { if (errno == EINPROGRESS) { sp<AMessage> msg = new AMessage(kWhatCompleteConnection, id()); msg->setMessage("reply", reply); msg->setInt32("connection-id", mConnectionID); msg->post(); return; } reply->setInt32("result", -errno); mState = DISCONNECTED; close(mSocket); mSocket = -1; } else { reply->setInt32("result", OK); mState = CONNECTED; mNextCSeq = 1; postReceiveReponseEvent(); } reply->post(); }
int err = ::connect(
mSocket, (const struct sockaddr *)&remote, sizeof(remote));
连接成功以后ARTSPConnection中的状态变为 mState = CONNECTED;
并且调用postReceiveReponseEvent函数:
void ARTSPConnection::postReceiveReponseEvent() { if (mReceiveResponseEventPending) { return; } sp<AMessage> msg = new AMessage(kWhatReceiveResponse, id()); msg->post(); mReceiveResponseEventPending = true; }发送kWhatReceiveResponse消息给自己
最终在
void ARTSPConnection::onReceiveResponse() { mReceiveResponseEventPending = false; if (mState != CONNECTED) { return; } struct timeval tv; tv.tv_sec = 0; tv.tv_usec = kSelectTimeoutUs; fd_set rs; FD_ZERO(&rs); FD_SET(mSocket, &rs); int res = select(mSocket + 1, &rs, NULL, NULL, &tv); CHECK_GE(res, 0); if (res == 1) { MakeSocketBlocking(mSocket, true); bool success = receiveRTSPReponse(); MakeSocketBlocking(mSocket, false); if (!success) { // Something horrible, irreparable has happened. flushPendingRequests(); return; } } postReceiveReponseEvent(); }
当有数据过来时res == 1
就通过receiveRTSPReponse函数来接受发送过来的数据
大家可能有印象,这个函数在之前有遇到请查找kWhatObserveBinaryData就知道了。
postReceiveReponseEvent之后会调用 reply->post();注意这个就是前面MyHandler中发送出来的AMessage('conn', id());
此时代码将回到MyHandler的onMessageReceived case 'conn':中来。
这里有 sp<AMessage> reply = new AMessage('desc', id());
mConn->sendRequest(request.c_str(), reply);
原理同之前的一样也是发送请求并且有reply
有接触过SIP的知道有SDP这个协议,这个是在会话建立以前双方用来协商媒体信息的。
不重复解释,直接看case 'desc': 这里通过与对方的协商,回复的response code ,大家都知道如果http的话回复200则代表访问成功,这里也是一样的
我们看200里面的操作:
if (response->mStatusCode != 200) { result = UNKNOWN_ERROR; } else { mSessionDesc = new ASessionDescription; mSessionDesc->setTo( response->mContent->data(), response->mContent->size()); if (!mSessionDesc->isValid()) { LOGE("Failed to parse session description."); result = ERROR_MALFORMED; } else { ssize_t i = response->mHeaders.indexOfKey("content-base"); if (i >= 0) { mBaseURL = response->mHeaders.valueAt(i); } else { i = response->mHeaders.indexOfKey("content-location"); if (i >= 0) { mBaseURL = response->mHeaders.valueAt(i); } else { mBaseURL = mSessionURL; } } if (!mBaseURL.startsWith("rtsp://")) { // Some misbehaving servers specify a relative // URL in one of the locations above, combine // it with the absolute session URL to get // something usable... LOGW("Server specified a non-absolute base URL" ", combining it with the session URL to " "get something usable..."); AString tmp; CHECK(MakeURL( mSessionURL.c_str(), mBaseURL.c_str(), &tmp)); mBaseURL = tmp; } CHECK_GT(mSessionDesc->countTracks(), 1u); setupTrack(1); } }
然后再看setupTrack(1),这里还不确定有几个track,只是先设置track 1:
void setupTrack(size_t index) { sp<APacketSource> source = new APacketSource(mSessionDesc, index); if (source->initCheck() != OK) { LOGW("Unsupported format. Ignoring track #%d.", index); sp<AMessage> reply = new AMessage('setu', id()); reply->setSize("index", index); reply->setInt32("result", ERROR_UNSUPPORTED); reply->post(); return; } AString url; CHECK(mSessionDesc->findAttribute(index, "a=control", &url)); AString trackURL; CHECK(MakeURL(mBaseURL.c_str(), url.c_str(), &trackURL)); mTracks.push(TrackInfo()); TrackInfo *info = &mTracks.editItemAt(mTracks.size() - 1); info->mURL = trackURL; info->mPacketSource = source; info->mUsingInterleavedTCP = false; info->mFirstSeqNumInSegment = 0; info->mNewSegment = true; LOGV("track #%d URL=%s", mTracks.size(), trackURL.c_str()); AString request = "SETUP "; request.append(trackURL); request.append(" RTSP/1.0\r\n"); if (mTryTCPInterleaving) { size_t interleaveIndex = 2 * (mTracks.size() - 1); info->mUsingInterleavedTCP = true; info->mRTPSocket = interleaveIndex; info->mRTCPSocket = interleaveIndex + 1; request.append("Transport: RTP/AVP/TCP;interleaved="); request.append(interleaveIndex); request.append("-"); request.append(interleaveIndex + 1); } else { unsigned rtpPort; ARTPConnection::MakePortPair( &info->mRTPSocket, &info->mRTCPSocket, &rtpPort); request.append("Transport: RTP/AVP/UDP;unicast;client_port="); request.append(rtpPort); request.append("-"); request.append(rtpPort + 1); } request.append("\r\n"); if (index > 1) { request.append("Session: "); request.append(mSessionID); request.append("\r\n"); } request.append("\r\n"); sp<AMessage> reply = new AMessage('setu', id()); reply->setSize("index", index); reply->setSize("track-index", mTracks.size() - 1); mConn->sendRequest(request.c_str(), reply); }
根据mSessionDesc创建了一个APacketSource对象,setupTrack唯一的参数是index,此参数标记了是第几个track,这里的track实际相当与一个媒体源,音频流/视频流
当track 1设置完成后又发送setu给自己然后设置第二个track ......
设置完的track会mRTPConn->addStream(
track->mRTPSocket, track->mRTCPSocket,
mSessionDesc, index,
notify, track->mUsingInterleavedTCP);
将track的流信息加入到ARTPConnection中,ARTPConnection相当于是对外的接口
注意这里传进去的sp<AMessage> notify = new AMessage('accu', id());
notify->setSize("track-index", trackIndex);
这个也是一个回复的消息。
void ARTPConnection::addStream( int rtpSocket, int rtcpSocket, const sp<ASessionDescription> &sessionDesc, size_t index, const sp<AMessage> ¬ify, bool injected) { sp<AMessage> msg = new AMessage(kWhatAddStream, id()); msg->setInt32("rtp-socket", rtpSocket); msg->setInt32("rtcp-socket", rtcpSocket); msg->setObject("session-desc", sessionDesc); msg->setSize("index", index); msg->setMessage("notify", notify); msg->setInt32("injected", injected); msg->post(); }
void ARTPConnection::onAddStream(const sp<AMessage> &msg) { mStreams.push_back(StreamInfo()); StreamInfo *info = &*--mStreams.end(); int32_t s; CHECK(msg->findInt32("rtp-socket", &s)); info->mRTPSocket = s; CHECK(msg->findInt32("rtcp-socket", &s)); info->mRTCPSocket = s; int32_t injected; CHECK(msg->findInt32("injected", &injected)); info->mIsInjected = injected; sp<RefBase> obj; CHECK(msg->findObject("session-desc", &obj)); info->mSessionDesc = static_cast<ASessionDescription *>(obj.get()); CHECK(msg->findSize("index", &info->mIndex)); CHECK(msg->findMessage("notify", &info->mNotifyMsg)); info->mNumRTCPPacketsReceived = 0; info->mNumRTPPacketsReceived = 0; memset(&info->mRemoteRTCPAddr, 0, sizeof(info->mRemoteRTCPAddr)); if (!injected) { postPollEvent(); } }
以及对info->mNotifyMsg的赋值(记住这个是AMessage('accu', id());)
在最后 调用了postPollEvent函数
这个函数实际是等待对方发来多媒体数据的:
void ARTPConnection::onPollStreams() { mPollEventPending = false; if (mStreams.empty()) { return; } struct timeval tv; tv.tv_sec = 0; tv.tv_usec = kSelectTimeoutUs; fd_set rs; FD_ZERO(&rs); int maxSocket = -1; for (List<StreamInfo>::iterator it = mStreams.begin(); it != mStreams.end(); ++it) { if ((*it).mIsInjected) { continue; } FD_SET(it->mRTPSocket, &rs); FD_SET(it->mRTCPSocket, &rs); if (it->mRTPSocket > maxSocket) { maxSocket = it->mRTPSocket; } if (it->mRTCPSocket > maxSocket) { maxSocket = it->mRTCPSocket; } } if (maxSocket == -1) { return; } int res = select(maxSocket + 1, &rs, NULL, NULL, &tv); CHECK_GE(res, 0); if (res > 0) { for (List<StreamInfo>::iterator it = mStreams.begin(); it != mStreams.end(); ++it) { if ((*it).mIsInjected) { continue; } if (FD_ISSET(it->mRTPSocket, &rs)) { receive(&*it, true); } if (FD_ISSET(it->mRTCPSocket, &rs)) { receive(&*it, false); } } } postPollEvent(); int64_t nowUs = ALooper::GetNowUs(); if (mLastReceiverReportTimeUs <= 0 || mLastReceiverReportTimeUs + 5000000ll <= nowUs) { sp<ABuffer> buffer = new ABuffer(kMaxUDPSize); for (List<StreamInfo>::iterator it = mStreams.begin(); it != mStreams.end(); ++it) { StreamInfo *s = &*it; if (s->mIsInjected) { continue; } if (s->mNumRTCPPacketsReceived == 0) { // We have never received any RTCP packets on this stream, // we don't even know where to send a report. continue; } buffer->setRange(0, 0); for (size_t i = 0; i < s->mSources.size(); ++i) { sp<ARTPSource> source = s->mSources.valueAt(i); source->addReceiverReport(buffer); if (mFlags & kRegularlyRequestFIR) { source->addFIR(buffer); } } if (buffer->size() > 0) { LOGV("Sending RR..."); ssize_t n = sendto( s->mRTCPSocket, buffer->data(), buffer->size(), 0, (const struct sockaddr *)&s->mRemoteRTCPAddr, sizeof(s->mRemoteRTCPAddr)); CHECK_EQ(n, (ssize_t)buffer->size()); mLastReceiverReportTimeUs = nowUs; } } } }
status_t ARTPConnection::receive(StreamInfo *s, bool receiveRTP) { LOGV("receiving %s", receiveRTP ? "RTP" : "RTCP"); CHECK(!s->mIsInjected); sp<ABuffer> buffer = new ABuffer(65536); socklen_t remoteAddrLen = (!receiveRTP && s->mNumRTCPPacketsReceived == 0) ? sizeof(s->mRemoteRTCPAddr) : 0; ssize_t nbytes = recvfrom( receiveRTP ? s->mRTPSocket : s->mRTCPSocket, buffer->data(), buffer->capacity(), 0, remoteAddrLen > 0 ? (struct sockaddr *)&s->mRemoteRTCPAddr : NULL, remoteAddrLen > 0 ? &remoteAddrLen : NULL); if (nbytes < 0) { return -1; } buffer->setRange(0, nbytes); // LOGI("received %d bytes.", buffer->size()); status_t err; if (receiveRTP) { err = parseRTP(s, buffer); } else { err = parseRTCP(s, buffer); } return err; }
if (s->mNumRTPPacketsReceived++ == 0) {
sp<AMessage> notify = s->mNotifyMsg->dup();
notify->setInt32("first-rtp", true);
notify->post();
}
这就是收到的第一个rtp包,然后就触发了前面的AMessage('accu', id());)消息,进而回到
MyHandler的 case 'accu':
而在这里面有这么一句话十分关键:
if (mFirstAccessUnit) {
mDoneMsg->setInt32("result", OK);
mDoneMsg->post();
mDoneMsg = NULL;
mFirstAccessUnit = false;
mFirstAccessUnitNTP = ntpTime;
}
大家或许早就忘记这个mDoneMsg是什么东西,你可以前面的ARTSPController::connect函数中找到答案,没错!它就是kWhatConnectDone
case kWhatConnectDone: { Mutex::Autolock autoLock(mLock); CHECK(msg->findInt32("result", &mConnectionResult)); mState = (mConnectionResult == OK) ? CONNECTED : DISCONNECTED; mCondition.signal(); break; }
前面看似很简单的一个函数mRTSPController->connect(mUri.string());竟然经历了这么多才回来!!!!!!
总结一下:
1.ARTSPController主要控制流媒体的连接,断开和快进,但是真正做事的是它的成员变量MyHandler
2.MyHandler类里面的ARTSPConnection负责流媒体的收发操作。
3.ARTSPConnection首先解析URL,得到主机的地址和端口号,然后建立socket 连接到远端的主机,并且建立监听机制等待远端主机发送过来的数据
先通过sdp的形式和远端主机协商媒体类型,然后根据类型建立不同的StreamInfo包括音频和视频的(并且每一个StreamInfo都有一个rtp socket 和一个rtcp socket)
并且监听这些socket,当有第一个rtp数据传过来时,我们就认为socket连接建立并返回。
前面这些都是RTSP里面所完成的,RTSP也属于stagefright框架中的一部分,所以它的一些类都是继承的stagefright中的基本类
struct APacketSource : public MediaSource
struct ARTSPController : public MediaExtractor
ARTSPController 在调用finishSetDataSource_l后被设置为数据源。
status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) { // Attempt to approximate overall stream bitrate by summing all // tracks' individual bitrates, if not all of them advertise bitrate, // we have to fail. int64_t totalBitRate = 0; for (size_t i = 0; i < extractor->countTracks(); ++i) { sp<MetaData> meta = extractor->getTrackMetaData(i); int32_t bitrate; if (!meta->findInt32(kKeyBitRate, &bitrate)) { totalBitRate = -1; break; } totalBitRate += bitrate; } mBitrate = totalBitRate; LOGV("mBitrate = %lld bits/sec", mBitrate); bool haveAudio = false; bool haveVideo = false; for (size_t i = 0; i < extractor->countTracks(); ++i) { sp<MetaData> meta = extractor->getTrackMetaData(i); const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); if (!haveVideo && !strncasecmp(mime, "video/", 6)) { setVideoSource(extractor->getTrack(i)); haveVideo = true; } else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) { setAudioSource(extractor->getTrack(i)); haveAudio = true; if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) { // Only do this for vorbis audio, none of the other audio // formats even support this ringtone specific hack and // retrieving the metadata on some extractors may turn out // to be very expensive. sp<MetaData> fileMeta = extractor->getMetaData(); int32_t loop; if (fileMeta != NULL && fileMeta->findInt32(kKeyAutoLoop, &loop) && loop != 0) { mFlags |= AUTO_LOOPING; } } } if (haveAudio && haveVideo) { break; } } if (!haveAudio && !haveVideo) { return UNKNOWN_ERROR; } mExtractorFlags = extractor->flags(); return OK; }并且将音频视频流都提取出来,然后
void AwesomePlayer::onPrepareAsyncEvent() { Mutex::Autolock autoLock(mLock); if (mFlags & PREPARE_CANCELLED) { LOGI("prepare was cancelled before doing anything"); abortPrepare(UNKNOWN_ERROR); return; } if (mUri.size() > 0) { status_t err = finishSetDataSource_l(); if (err != OK) { abortPrepare(err); return; } } if (mVideoTrack != NULL && mVideoSource == NULL) { status_t err = initVideoDecoder(); if (err != OK) { abortPrepare(err); return; } } if (mAudioTrack != NULL && mAudioSource == NULL) { status_t err = initAudioDecoder(); if (err != OK) { abortPrepare(err); return; } } if (mCachedSource != NULL || mRTSPController != NULL) { postBufferingEvent_l(); } else { finishAsyncPrepare_l(); } }
后面用户调用play实际就是开始接收数据,解码,然后播放渲染。