----------------------------------------------------------------------------------------------------------------------------------------
一分钟快速搭建 rtmpd 服务器: https://blog.csdn.net/freeabc/article/details/102880984
软件下载地址: http://www.qiyicc.com/download/rtmpd.rar
github 地址:https://github.com/superconvert/smart_rtmpd
-----------------------------------------------------------------------------------------------------------------------------------------
//*************************************************************************************************
//
// 接收端收到对方的 offer 指令后的流程分析
//
//*************************************************************************************************
1. 接收到对方的 offer 指令 ( websocket 信令 )
WebSocketRTCClient.java
void WebSocketRTCClient::onWebSocketMessage(final String msg)
// 接收到 offer 指令
if (type.equals("offer")) {
if (!initiator) {
SessionDescription sdp = new SessionDescription(
SessionDescription.Type.fromCanonicalForm(type), json.getString("sdp"));
events.onRemoteDescription(sdp);
} else {
reportError("Received offer for call receiver: " + msg);
}
}
2. 回调到此函数
CallActivity.java
void CallActivity::onRemoteDescription(final SessionDescription sdp)
// 这个函数的流程分析参见下面的 3
peerConnectionClient.setRemoteDescription(sdp);
if (!signalingParameters.initiator) {
// 这个函数的流程分析参见下面的 4
peerConnectionClient.createAnswer();
}
3. peerConnectionClient.setRemoteDescription 流程分析,主要是建立传输对象,以及
pipeline ( socket --> jitterbuffer --> decoder --> render ) 上所有的组件,并管理这些组件的关系
// Java
void PeerConnectionClient::setRemoteDescription(final SessionDescription sdp)
peerConnection.setRemoteDescription(sdpObserver, sdpRemote);
PeerConnection::setRemoteDescription(SdpObserver observer, SessionDescription sdp)
nativeSetRemoteDescription(observer, sdp);
// JNI C++
JNI_GENERATOR_EXPORT void Java_org_webrtc_PeerConnection_nativeSetRemoteDescription(
JNIEnv* env,
jobject jcaller,
jobject observer,
jobject sdp) {
return JNI_PeerConnection_SetRemoteDescription(env, base::android::JavaParamRef(env,
jcaller), base::android::JavaParamRef(env, observer),
base::android::JavaParamRef(env, sdp));
}
3.1
./sdk/android/src/jni/pc/peer_connection.cc
static void JNI_PeerConnection_SetRemoteDescription(
JNIEnv* jni,
const JavaParamRef& j_pc,
const JavaParamRef& j_observer,
const JavaParamRef& j_sdp) {
rtc::scoped_refptr observer(
new rtc::RefCountedObject(jni, j_observer, nullptr));
ExtractNativePC(jni, j_pc)->SetRemoteDescription(
observer, JavaToNativeSessionDescription(jni, j_sdp).release());
}
3.2
./pc/peer_connection.cc
void PeerConnection::SetRemoteDescription(
SetSessionDescriptionObserver* observer,
SessionDescriptionInterface* desc_ptr)
this_weak_ptr->DoSetRemoteDescription(std::move(desc),
rtc::scoped_refptr(
new SetRemoteDescriptionObserverAdapter(this_weak_ptr.get(), std::move(observer_refptr))));
3.3
./pc/peer_connection.cc
void PeerConnection::DoSetRemoteDescription(
std::unique_ptr desc,
rtc::scoped_refptr observer)
error = ApplyRemoteDescription(std::move(desc));
observer->OnSetRemoteDescriptionComplete(RTCError::OK());
3.4
./pc/peer_connection.cc
RTCError PeerConnection::ApplyRemoteDescription(
std::unique_ptr desc)
// 参见流程 3.4.1
RTCError error = PushdownTransportDescription(cricket::CS_REMOTE, type);
// 参见流程 3.4.2
RTCError error = CreateChannels(*remote_description()->description());
// 参见流程 3.4.3
error = UpdateSessionState(type, cricket::CS_REMOTE, remote_description()->description());
3.4.1 PushdownTransportDescription
RTCError PeerConnection::PushdownTransportDescription(
cricket::ContentSource source, SdpType type) {
if (source == cricket::CS_LOCAL) {
const SessionDescriptionInterface* sdesc = local_description();
RTC_DCHECK(sdesc);
return transport_controller_->SetLocalDescription(type, sdesc->description());
} else {
// 调用这个流程
const SessionDescriptionInterface* sdesc = remote_description();
RTC_DCHECK(sdesc);
return transport_controller_->SetRemoteDescription(type, sdesc->description());
}
}
./pc/jsep_transport_controller.cc
RTCError JsepTransportController::SetRemoteDescription(
SdpType type, const cricket::SessionDescription* description)
return ApplyDescription_n(/*local=*/false, type, description);
RTCError JsepTransportController::ApplyDescription_n(
bool local, SdpType type, const cricket::SessionDescription* description)
for (const cricket::ContentInfo& content_info : description->contents()) {
// Don't create transports for rejected m-lines and bundled m-lines."
if (content_info.rejected ||
(IsBundled(content_info.name) && content_info.name != *bundled_mid())) {
continue;
}
// 参见博文 https://blog.csdn.net/freeabc/article/details/106287318
// 内,有关 JsepTransportController::MaybeCreateJsepTransport 的分析
// 其实就是创建一个底层的传输对象,并绑定之间的传输关系
error = MaybeCreateJsepTransport(local, content_info, *description);
if (!error.ok()) {
return error;
}
}
for (size_t i = 0; i < description->contents().size(); ++i) {
SetIceRole_n(DetermineIceRole(transport, transport_info, type, local));
transport->SetRemoteJsepTransportDescription(jsep_description, type);
}
3.4.2 CreateChannels 创建音视频通道
// 参见博文 https://blog.csdn.net/freeabc/article/details/106287318
RTCError PeerConnection::CreateChannels(const SessionDescription& desc)
// 就是创建一个 VideoChannel ,而 VideoChannel 的 media_channel 就是 WebRtcVideoChannel
cricket::VideoChannel* video_channel = CreateVideoChannel(video->name);
if (!video_channel) {
LOG_AND_RETURN_ERROR(RTCErrorType::INTERNAL_ERROR, "Failed to create video channel.");
}
// 绑定 RtpTransceiver 与 VideoChannel
GetVideoTransceiver()->internal()->SetChannel(video_channel);
3.4.3 UpdateSessionState 主要是更新通道并建立各个 pipeline 的链接
// 参见博文 https://blog.csdn.net/freeabc/article/details/106287318
./pc/peer_connection.cc+
RTCError PeerConnection::UpdateSessionState(SdpType type, cricket::ContentSource source,
const cricket::SessionDescription* description)
error = PushdownMediaDescription(type, source);
RTCError PeerConnection::PushdownMediaDescription(SdpType type, cricket::ContentSource source)
for (const auto& transceiver : transceivers_) {
const ContentInfo* content_info = FindMediaSectionForTransceiver(transceiver, sdesc);
cricket::ChannelInterface* channel = transceiver->internal()->channel();
const MediaContentDescription* content_desc = content_info->media_description();
bool success = (source == cricket::CS_LOCAL)
? channel->SetLocalContent(content_desc, type, &error)
: channel->SetRemoteContent(content_desc, type, &error);
./pc/channel.cc
bool BaseChannel::SetRemoteContent(const MediaContentDescription* content,
SdpType type, std::string* error_desc) {
TRACE_EVENT0("webrtc", "BaseChannel::SetRemoteContent");
return InvokeOnWorker(RTC_FROM_HERE,
Bind(&BaseChannel::SetRemoteContent_w, this, content, type, error_desc));
}
bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
SdpType type, std::string* error_desc)
if (!UpdateRemoteStreams_w(video->streams(), type, error_desc)) {
SafeSetError("Failed to set remote video description streams.", error_desc);
return false;
}
bool BaseChannel::UpdateRemoteStreams_w(const std::vector& streams,
SdpType type, std::string* error_desc)
for (const StreamParams& new_stream : streams) {
// We allow a StreamParams with an empty list of SSRCs, in which case the
// MediaChannel will cache the parameters and use them for any unsignaled
// stream received later.
if ((!new_stream.has_ssrcs() && !HasStreamWithNoSsrcs(remote_streams_)) ||
!GetStreamBySsrc(remote_streams_, new_stream.first_ssrc())) {
// 参见流程 3.4.3.1
if (AddRecvStream_w(new_stream)) {
}
}
}
// 参见流程 3.4.3.2,就是建立底层的接收到的 RTP 数据到 channel 里面来
RegisterRtpDemuxerSink();
remote_streams_ = streams;
3.4.3.1
./pc/channel.cc
bool BaseChannel::AddRecvStream_w(const StreamParams& sp) {
RTC_DCHECK(worker_thread() == rtc::Thread::Current());
// media_channel 就是 WebRtcVideoChannel 对象
// 参看博客 https://blog.csdn.net/freeabc/article/details/106287318
// 流程 22
return media_channel()->AddRecvStream(sp);
}
./media/engine/webrtc_video_engine.cc
bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp) {
return AddRecvStream(sp, false);
}
bool WebRtcVideoChannel::AddRecvStream(const StreamParams& sp,
bool default_stream)
receive_streams_[ssrc] = new WebRtcVideoReceiveStream(
this, call_, sp, std::move(config), decoder_factory_, default_stream,
recv_codecs_, flexfec_config);
有关 WebRtcVideoReceiveStream 的深入分析,参见下面的
<<视频数据接收到后,从 jitterbuffer 中到解码的过程分析>>
至此 WebRtcVideoChannel 中的 receive_streams_ 对象产生, 就是 WebRtcVideoReceiveStream 流
3.4.3.2
参见博文 https://blog.csdn.net/freeabc/article/details/106142951
流程 10.3.3
主要目的是:我们看到 VideoChannel 做为 Sink 加到 RtpTransport 里的 rtp_demuxer_, 所以 RtpDemuxer::OnRtpPacket
会调用 VideoChannel::OnRtpPacket,建立 pipeline ( socket ---> jitterbuffer ---> decoder ---> render ) 之间部件的关联
4. peerConnectionClient.createAnswer 流程分析
void PeerConnectionClient::createAnswer() {
peerConnection.createAnswer(sdpObserver, sdpMediaConstraints);
}
void PeerConnection::createAnswer(SdpObserver observer, MediaConstraints constraints) {
nativeCreateAnswer(observer, constraints);
}
JNI_GENERATOR_EXPORT void Java_org_webrtc_PeerConnection_nativeCreateAnswer(
JNIEnv* env,
jobject jcaller,
jobject observer,
jobject constraints) {
return JNI_PeerConnection_CreateAnswer(env, base::android::JavaParamRef(env, jcaller),
base::android::JavaParamRef(env, observer), base::android::JavaParamRef(env, constraints));
}
./sdk/android/src/jni/pc/peer_connection.cc
void JNI_PeerConnection_CreateAnswer(
JNIEnv* jni,
const JavaParamRef& j_pc,
const JavaParamRef& j_observer,
const JavaParamRef& j_constraints) {
std::unique_ptr constraints =
JavaToNativeMediaConstraints(jni, j_constraints);
rtc::scoped_refptr observer(
new rtc::RefCountedObject(jni, j_observer, std::move(constraints)));
PeerConnectionInterface::RTCOfferAnswerOptions options;
CopyConstraintsIntoOfferAnswerOptions(observer->constraints(), &options);
ExtractNativePC(jni, j_pc)->CreateAnswer(observer, options);
}
./pc/peer_connection.cc
void PeerConnection::CreateAnswer(CreateSessionDescriptionObserver* observer,
const RTCOfferAnswerOptions& options)
this_weak_ptr->DoCreateAnswer(options, observer_wrapper);
void PeerConnection::DoCreateAnswer(
const RTCOfferAnswerOptions& options,
rtc::scoped_refptr observer)
webrtc_session_desc_factory_->CreateAnswer(observer, session_options);
./pc/webrtc_session_description_factory.cc
void WebRtcSessionDescriptionFactory::CreateAnswer(
CreateSessionDescriptionObserver* observer,
const cricket::MediaSessionOptions& session_options)
InternalCreateAnswer(request);
void WebRtcSessionDescriptionFactory::InternalCreateAnswer(
CreateSessionDescriptionRequest request)
PostCreateSessionDescriptionSucceeded(request.observer, std::move(answer));
// 产生相应的 SDP 并通知上层,做相应的准备工作。
// 后续流程参考 https://blog.csdn.net/freeabc/article/details/106287318
// 这个流程主要是初始化操作,包括网络,中间层的 channel, receive, sender 对象的创建等。
//*************************************************************************************************
//
// 视频数据接收到后,从 jitterbuffer 中到解码的过程分析
//
//*************************************************************************************************
1. 我们分析 WebRtcVideoReceiveStream 的构造函数
WebRtcVideoChannel::WebRtcVideoReceiveStream::WebRtcVideoReceiveStream(
WebRtcVideoChannel* channel,
webrtc::Call* call,
const StreamParams& sp,
webrtc::VideoReceiveStream::Config config,
webrtc::VideoDecoderFactory* decoder_factory,
bool default_stream,
const std::vector& recv_codecs,
const webrtc::FlexfecReceiveStream::Config& flexfec_config)
: channel_(channel),
call_(call),
stream_params_(sp),
stream_(NULL),
default_stream_(default_stream),
config_(std::move(config)),
flexfec_config_(flexfec_config),
flexfec_stream_(nullptr),
decoder_factory_(decoder_factory),
sink_(NULL),
first_frame_timestamp_(-1),
estimated_remote_start_ntp_time_ms_(0) {
config_.renderer = this;
ConfigureCodecs(recv_codecs);
ConfigureFlexfecCodec(flexfec_config.payload_type);
MaybeRecreateWebRtcFlexfecStream();
// 参见下面的分析 2
RecreateWebRtcVideoStream();
}
2.
./media/engine/webrtc_video_engine.cc
void WebRtcVideoChannel::WebRtcVideoReceiveStream::RecreateWebRtcVideoStream() {
absl::optional base_minimum_playout_delay_ms;
if (stream_) {
base_minimum_playout_delay_ms = stream_->GetBaseMinimumPlayoutDelayMs();
MaybeDissociateFlexfecFromVideo();
call_->DestroyVideoReceiveStream(stream_);
stream_ = nullptr;
}
webrtc::VideoReceiveStream::Config config = config_.Copy();
config.rtp.protected_by_flexfec = (flexfec_stream_ != nullptr);
config.stream_id = stream_params_.id;
// 这个地方产生了一个 VideoReceiveStream 对象,参见下面的流程 3
stream_ = call_->CreateVideoReceiveStream(std::move(config));
if (base_minimum_playout_delay_ms) {
stream_->SetBaseMinimumPlayoutDelayMs(base_minimum_playout_delay_ms.value());
}
MaybeAssociateFlexfecWithVideo();
// 这个地方参见下面的流程 4
stream_->Start();
if (webrtc::field_trial::IsEnabled(
"WebRTC-Video-BufferPacketsWithUnknownSsrc")) {
channel_->BackfillBufferedPackets(stream_params_.ssrcs);
}
}
3. 最终产生一个 VideoReceiveStream 对象
./call/call.cc
webrtc::VideoReceiveStream* Call::CreateVideoReceiveStream(
webrtc::VideoReceiveStream::Config configuration) {
TRACE_EVENT0("webrtc", "Call::CreateVideoReceiveStream");
RTC_DCHECK_RUN_ON(&configuration_sequence_checker_);
receive_side_cc_.SetSendPeriodicFeedback(
SendPeriodicFeedback(configuration.rtp.extensions));
RegisterRateObserver();
VideoReceiveStream* receive_stream = new VideoReceiveStream(
task_queue_factory_, &video_receiver_controller_, num_cpu_cores_,
transport_send_ptr_->packet_router(), std::move(configuration),
module_process_thread_.get(), call_stats_.get(), clock_);
const webrtc::VideoReceiveStream::Config& config = receive_stream->config();
{
WriteLockScoped write_lock(*receive_crit_);
if (config.rtp.rtx_ssrc) {
// We record identical config for the rtx stream as for the main
// stream. Since the transport_send_cc negotiation is per payload
// type, we may get an incorrect value for the rtx stream, but
// that is unlikely to matter in practice.
receive_rtp_config_.emplace(config.rtp.rtx_ssrc,
ReceiveRtpConfig(config));
}
receive_rtp_config_.emplace(config.rtp.remote_ssrc,
ReceiveRtpConfig(config));
video_receive_streams_.insert(receive_stream);
ConfigureSync(config.sync_group);
}
receive_stream->SignalNetworkState(video_network_state_);
UpdateAggregateNetworkState();
event_log_->Log(std::make_unique(
CreateRtcLogStreamConfig(config)));
return receive_stream;
}
4. VideoReceiveStream 的 start 其实就是 jitterbuffer 的初始化工作
./video/video_receive_stream.cc
void VideoReceiveStream::Start()
// jitterbuffer 的初始化见流程 4.1
frame_buffer_->Start();
// 这个地方开启一个 jitterbuffer 的处理
decode_queue_.PostTask([this] {
RTC_DCHECK_RUN_ON(&decode_queue_);
decoder_stopped_ = false;
// 参见下面的流程 5,投递一个从 jitterbuffer 获取帧的任务
StartNextDecode();
});
4.1 Jitter buffer 的产生是 VideoReceiveStream 构造函数中进行的
./video/video_receive_stream.cc
VideoReceiveStream::VideoReceiveStream(
TaskQueueFactory* task_queue_factory,
RtpStreamReceiverControllerInterface* receiver_controller,
int num_cpu_cores,
PacketRouter* packet_router,
VideoReceiveStream::Config config,
ProcessThread* process_thread,
CallStats* call_stats,
Clock* clock,
VCMTiming* timing)
// ./modules/video_coding/frame_buffer2.cc
frame_buffer_.reset(new video_coding::FrameBuffer(clock_, timing_.get(), &stats_proxy_));
5. StartNextDecode 的处理流程分析
./video/video_receive_stream.cc
void VideoReceiveStream::StartNextDecode() {
TRACE_EVENT0("webrtc", "VideoReceiveStream::StartNextDecode");
// 参见流程 6
frame_buffer_->NextFrame(
GetWaitMs(), keyframe_required_, &decode_queue_,
/* encoded frame handler */
[this](std::unique_ptr frame, ReturnReason res) {
RTC_DCHECK_EQ(frame == nullptr, res == ReturnReason::kTimeout);
RTC_DCHECK_EQ(frame != nullptr, res == ReturnReason::kFrameFound);
decode_queue_.PostTask([this, frame = std::move(frame)]() mutable {
RTC_DCHECK_RUN_ON(&decode_queue_);
if (decoder_stopped_)
return;
if (frame) {
// 开始进行解码处理,参见下面的流程 10 开始准备解码数据了
HandleEncodedFrame(std::move(frame));
} else {
HandleFrameBufferTimeout();
}
StartNextDecode();
});
}
);
}
6. jitterbuffer 的 NextFrame 函数的定义
./modules/video_coding/frame_buffer2.cc
void FrameBuffer::NextFrame(
int64_t max_wait_time_ms,
bool keyframe_required,
rtc::TaskQueue* callback_queue,
std::function, ReturnReason)> handler) {
RTC_DCHECK_RUN_ON(callback_queue);
TRACE_EVENT0("webrtc", "FrameBuffer::NextFrame");
int64_t latest_return_time_ms =
clock_->TimeInMilliseconds() + max_wait_time_ms;
rtc::CritScope lock(&crit_);
if (stopped_) {
return;
}
latest_return_time_ms_ = latest_return_time_ms;
keyframe_required_ = keyframe_required;
// 上述的 lambada 就是这个
frame_handler_ = handler;
callback_queue_ = callback_queue;
// 参见下面流程 7
StartWaitForNextFrameOnQueue();
}
7.
./modules/video_coding/frame_buffer2.cc
void FrameBuffer::StartWaitForNextFrameOnQueue() {
RTC_DCHECK(callback_queue_);
RTC_DCHECK(!callback_task_.Running());
// 这个里面从接收的队列里 frames_ 取出一帧放到待解码队列 参见流程 8
int64_t wait_ms = FindNextFrame(clock_->TimeInMilliseconds());
callback_task_ = RepeatingTaskHandle::DelayedStart(
callback_queue_->Get(), TimeDelta::ms(wait_ms), [this] {
// If this task has not been cancelled, we did not get any new frames
// while waiting. Continue with frame delivery.
rtc::CritScope lock(&crit_);
if (!frames_to_decode_.empty()) {
// 这个首先调用 GetNextFrame 流程 9 ,然后再调用上面的 lambada 流程 10
// We have frames, deliver!
frame_handler_(absl::WrapUnique(GetNextFrame()), kFrameFound);
CancelCallback();
return TimeDelta::Zero(); // Ignored.
} else if (clock_->TimeInMilliseconds() >= latest_return_time_ms_) {
// We have timed out, signal this and stop repeating.
frame_handler_(nullptr, kTimeout);
CancelCallback();
return TimeDelta::Zero(); // Ignored.
} else {
// If there's no frames to decode and there is still time left, it
// means that the frame buffer was cleared between creation and
// execution of this task. Continue waiting for the remaining time.
int64_t wait_ms = FindNextFrame(clock_->TimeInMilliseconds());
return TimeDelta::ms(wait_ms);
}
}
);
}
./rtc_base/task_utils/repeating_task.h
template
static RepeatingTaskHandle DelayedStart(TaskQueueBase* task_queue,
TimeDelta first_delay,
Closure&& closure) {
auto repeating_task = std::make_unique>(
task_queue, first_delay, std::forward(closure));
auto* repeating_task_ptr = repeating_task.get();
task_queue->PostDelayedTask(std::move(repeating_task), first_delay.ms());
return RepeatingTaskHandle(repeating_task_ptr);
}
8.
int64_t FrameBuffer::FindNextFrame(int64_t now_ms) {
// frames_ 接收队列
for (auto frame_it = frames_.begin();
frame_it != frames_.end() && frame_it->first <= last_continuous_frame_;
++frame_it) {
EncodedFrame* frame = frame_it->second.frame.get();
std::vector current_superframe;
current_superframe.push_back(frame_it);
bool last_layer_completed = frame_it->second.frame->is_last_spatial_layer;
FrameMap::iterator next_frame_it = frame_it;
while (true) {
++next_frame_it;
if (next_frame_it == frames_.end() ||
next_frame_it->first.picture_id != frame->id.picture_id ||
!next_frame_it->second.continuous) {
break;
}
// Check if the next frame has some undecoded references other than
// the previous frame in the same superframe.
size_t num_allowed_undecoded_refs =
(next_frame_it->second.frame->inter_layer_predicted) ? 1 : 0;
if (next_frame_it->second.num_missing_decodable >
num_allowed_undecoded_refs) {
break;
}
// All frames in the superframe should have the same timestamp.
if (frame->Timestamp() != next_frame_it->second.frame->Timestamp()) {
RTC_LOG(LS_WARNING) << "Frames in a single superframe have different"
" timestamps. Skipping undecodable superframe.";
break;
}
// 获取一帧
current_superframe.push_back(next_frame_it);
last_layer_completed = next_frame_it->second.frame->is_last_spatial_layer;
}
}
// 解码队列
frames_to_decode_ = std::move(current_superframe);
}
// 我们讲一下 frames_ 数据的由来
// 参见博文 https://blog.csdn.net/freeabc/article/details/106142951 知道 视频流接收最后都调用
int64_t FrameBuffer::InsertFrame(std::unique_ptr frame)
// 把数据加到这个队列
auto info = frames_.emplace(id, FrameInfo()).first;
// 接收到的视频数据
info->second.frame = std::move(frame);
// 这个激发
new_continuous_frame_event_.Set();
// 继续投递一个任务继续执行 StartWaitForNextFrameOnQueue
if (callback_queue_) {
callback_queue_->PostTask([this] {
rtc::CritScope lock(&crit_);
if (!callback_task_.Running())
return;
RTC_CHECK(frame_handler_);
callback_task_.Stop();
StartWaitForNextFrameOnQueue();
});
}
9.
EncodedFrame* FrameBuffer::GetNextFrame() {
std::vector frames_out;
for (FrameMap::iterator& frame_it : frames_to_decode_) {
EncodedFrame* frame = frame_it->second.frame.release();
frames_out.push_back(frame);
}
UpdateJitterDelay();
UpdateTimingFrameInfo();
return CombineAndDeleteFrames(frames_out);
}
10.
./video/video_receive_stream.cc
void VideoReceiveStream::HandleEncodedFrame(std::unique_ptr frame) {
int64_t now_ms = clock_->TimeInMilliseconds();
// Current OnPreDecode only cares about QP for VP8.
int qp = -1;
if (frame->CodecSpecific()->codecType == kVideoCodecVP8) {
if (!vp8::GetQp(frame->data(), frame->size(), &qp)) {
RTC_LOG(LS_WARNING) << "Failed to extract QP from VP8 video frame";
}
}
stats_proxy_.OnPreDecode(frame->CodecSpecific()->codecType, qp);
HandleKeyFrameGeneration(frame->FrameType() == VideoFrameType::kVideoFrameKey,
now_ms);
// 参见 10.1 的流程分析
int decode_result = video_receiver_.Decode(frame.get());
if (decode_result == WEBRTC_VIDEO_CODEC_OK ||
decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME) {
keyframe_required_ = false;
frame_decoded_ = true;
rtp_video_stream_receiver_.FrameDecoded(frame->id.picture_id);
if (decode_result == WEBRTC_VIDEO_CODEC_OK_REQUEST_KEYFRAME)
RequestKeyFrame(now_ms);
} else if (!frame_decoded_ || !keyframe_required_ ||
(last_keyframe_request_ms_ + max_wait_for_keyframe_ms_ < now_ms)) {
keyframe_required_ = true;
// TODO(philipel): Remove this keyframe request when downstream project
// has been fixed.
RequestKeyFrame(now_ms);
}
if (encoded_frame_buffer_function_) {
frame->Retain();
encoded_frame_buffer_function_(WebRtcRecordableEncodedFrame(*frame));
}
}
10.1 VideoReceiver2 video_receiver_;
./modules/video_coding/video_receiver2.cc
int32_t VideoReceiver2::Decode(const VCMEncodedFrame* frame) {
RTC_DCHECK_RUN_ON(&decoder_thread_checker_);
TRACE_EVENT0("webrtc", "VideoReceiver2::Decode");
// Change decoder if payload type has changed
VCMGenericDecoder* decoder =
codecDataBase_.GetDecoder(*frame, &decodedFrameCallback_);
if (decoder == nullptr) {
return VCM_NO_CODEC_REGISTERED;
}
// 解码器流程分析完毕,具体解码具体流程参见 10.2
return decoder->Decode(*frame, clock_->TimeInMilliseconds());
}
我们分析 codecDataBase_.GetDecoder(*frame, &decodedFrameCallback_) 函数,流程如下:
./modules/video_coding/decoder_database.cc
VCMGenericDecoder* VCMDecoderDataBase::GetDecoder(
const VCMEncodedFrame& frame,
VCMDecodedFrameCallback* decoded_frame_callback) {
RTC_DCHECK(decoded_frame_callback->UserReceiveCallback());
uint8_t payload_type = frame.PayloadType();
if (payload_type == receive_codec_.plType || payload_type == 0) {
return ptr_decoder_.get();
}
// If decoder exists - delete.
if (ptr_decoder_) {
ptr_decoder_.reset();
memset(&receive_codec_, 0, sizeof(VideoCodec));
}
ptr_decoder_ = CreateAndInitDecoder(frame, &receive_codec_);
if (!ptr_decoder_) {
return nullptr;
}
VCMReceiveCallback* callback = decoded_frame_callback->UserReceiveCallback();
callback->OnIncomingPayloadType(receive_codec_.plType);
if (ptr_decoder_->RegisterDecodeCompleteCallback(decoded_frame_callback) <
0) {
ptr_decoder_.reset();
memset(&receive_codec_, 0, sizeof(VideoCodec));
return nullptr;
}
return ptr_decoder_.get();
}
std::unique_ptr VCMDecoderDataBase::CreateAndInitDecoder(
const VCMEncodedFrame& frame,
VideoCodec* new_codec) const {
uint8_t payload_type = frame.PayloadType();
RTC_LOG(LS_INFO) << "Initializing decoder with payload type '"
<< static_cast(payload_type) << "'.";
RTC_DCHECK(new_codec);
const VCMDecoderMapItem* decoder_item = FindDecoderItem(payload_type);
if (!decoder_item) {
RTC_LOG(LS_ERROR) << "Can't find a decoder associated with payload type: "
<< static_cast(payload_type);
return nullptr;
}
std::unique_ptr ptr_decoder;
const VCMExtDecoderMapItem* external_dec_item =
FindExternalDecoderItem(payload_type);
if (external_dec_item) {
// External codec. ---> 就是分配了一个这个对象
ptr_decoder.reset(new VCMGenericDecoder(
external_dec_item->external_decoder_instance, true));
} else {
RTC_LOG(LS_ERROR) << "No decoder of this type exists.";
}
if (!ptr_decoder)
return nullptr;
// Copy over input resolutions to prevent codec reinitialization due to
// the first frame being of a different resolution than the database values.
// This is best effort, since there's no guarantee that width/height have been
// parsed yet (and may be zero).
if (frame.EncodedImage()._encodedWidth > 0 &&
frame.EncodedImage()._encodedHeight > 0) {
decoder_item->settings->width = frame.EncodedImage()._encodedWidth;
decoder_item->settings->height = frame.EncodedImage()._encodedHeight;
}
if (ptr_decoder->InitDecode(decoder_item->settings.get(),
decoder_item->number_of_cores) < 0) {
return nullptr;
}
memcpy(new_codec, decoder_item->settings.get(), sizeof(VideoCodec));
return ptr_decoder;
}
我们分析 codecDataBase_ 的由来,参见这个函数,我们看到就是 VideoReceiveStream 的 video_decoders_ 的对象
./video/video_receive_stream.cc
void VideoReceiveStream::Start()
// 这个地方一般把三种编码都注册进去 VP8, VP9, h264(avc)
for (const Decoder& decoder : config_.decoders) {
// 参见下面的 LegacyCreateVideoDecoder 分析
std::unique_ptr video_decoder =
decoder.decoder_factory->LegacyCreateVideoDecoder(decoder.video_format,
config_.stream_id);
if (!decoded_output_file.empty()) {
char filename_buffer[256];
rtc::SimpleStringBuilder ssb(filename_buffer);
ssb << decoded_output_file << "/webrtc_receive_stream_"
<< this->config_.rtp.remote_ssrc << "-" << rtc::TimeMicros()
<< ".ivf";
video_decoder = CreateFrameDumpingDecoderWrapper(std::move(video_decoder), FileWrapper::OpenWriteOnly(ssb.str()));
}
video_decoders_.push_back(std::move(video_decoder));
// 这个就是 VideoReceiver2 对象
video_receiver_.RegisterExternalDecoder(video_decoders_.back().get(),
decoder.payload_type);
}
// 我们看看 RegisterExternalDecoder 函数
./modules/video_coding/video_receiver2.cc
void VideoReceiver2::RegisterExternalDecoder(VideoDecoder* externalDecoder,
uint8_t payloadType) {
RTC_DCHECK_RUN_ON(&construction_thread_checker_);
RTC_DCHECK(!IsDecoderThreadRunning());
if (externalDecoder == nullptr) {
RTC_CHECK(codecDataBase_.DeregisterExternalDecoder(payloadType));
return;
}
codecDataBase_.RegisterExternalDecoder(externalDecoder, payloadType);
}
./modules/video_coding/decoder_database.cc
void VCMDecoderDataBase::RegisterExternalDecoder(VideoDecoder* external_decoder,
uint8_t payload_type) {
// If payload value already exists, erase old and insert new.
VCMExtDecoderMapItem* ext_decoder =
new VCMExtDecoderMapItem(external_decoder, payload_type);
DeregisterExternalDecoder(payload_type);
dec_external_map_[payload_type] = ext_decoder;
}
// LegacyCreateVideoDecoder 分析
我们继续分析 LegacyCreateVideoDecoder 解码器的创建过程,首先分析解码类厂,然后分析 CreateVideoDecoder 函数
./api/video_codecs/video_decoder_factory.cc
std::unique_ptr VideoDecoderFactory::LegacyCreateVideoDecoder(
const SdpVideoFormat& format,
const std::string& receive_stream_id) {
return CreateVideoDecoder(format);
}
//------------------------------------------------
// 解码类厂的创建
//------------------------------------------------
./sdk/android/src/jni/pc/peer_connection_factory.cc
ScopedJavaLocalRef CreatePeerConnectionFactoryForJava(
JNIEnv* jni,
const JavaParamRef& jcontext,
const JavaParamRef& joptions,
rtc::scoped_refptr audio_device_module,
rtc::scoped_refptr audio_encoder_factory,
rtc::scoped_refptr audio_decoder_factory,
const JavaParamRef& jencoder_factory,
const JavaParamRef& jdecoder_factory,
rtc::scoped_refptr audio_processor,
std::unique_ptr fec_controller_factory,
std::unique_ptr
network_controller_factory,
std::unique_ptr
network_state_predictor_factory,
std::unique_ptr media_transport_factory,
std::unique_ptr neteq_factory)
media_dependencies.video_decoder_factory =
absl::WrapUnique(CreateVideoDecoderFactory(jni, jdecoder_factory));
./sdk/android/src/jni/pc/video.cc
VideoDecoderFactory* CreateVideoDecoderFactory(
JNIEnv* jni,
const JavaRef& j_decoder_factory) {
return IsNull(jni, j_decoder_factory)
? nullptr
: new VideoDecoderFactoryWrapper(jni, j_decoder_factory);
}
// 类厂创建完毕,就是一个 VideoDecoderFactoryWrapper 对象
// CreateVideoDecoder 函数如下:
./sdk/android/src/jni/video_decoder_factory_wrapper.cc
std::unique_ptr VideoDecoderFactoryWrapper::CreateVideoDecoder(
const SdpVideoFormat& format) {
JNIEnv* jni = AttachCurrentThreadIfNeeded();
ScopedJavaLocalRef j_codec_info =
SdpVideoFormatToVideoCodecInfo(jni, format);
// 参见流程 1
ScopedJavaLocalRef decoder = Java_VideoDecoderFactory_createDecoder(
jni, decoder_factory_, j_codec_info);
if (!decoder.obj())
return nullptr;
// JNI 层的对应 Java 层的 video decoder 对象,参见流程 2
return JavaToNativeVideoDecoder(jni, decoder);
}
1.
// 这个就是调用 Java 层的产生硬解码器了
static base::android::ScopedJavaLocalRef
Java_VideoDecoderFactory_createDecoder(JNIEnv*
env, const base::android::JavaRef& obj, const base::android::JavaRef& info) {
jclass clazz = org_webrtc_VideoDecoderFactory_clazz(env);
CHECK_CLAZZ(env, obj.obj(),
org_webrtc_VideoDecoderFactory_clazz(env), NULL);
jni_generator::JniJavaCallContextChecked call_context;
call_context.Init(
env,
clazz,
"createDecoder",
"(Lorg/webrtc/VideoCodecInfo;)Lorg/webrtc/VideoDecoder;",
&g_org_webrtc_VideoDecoderFactory_createDecoder);
jobject ret =
env->CallObjectMethod(obj.obj(), call_context.base.method_id, info.obj());
return base::android::ScopedJavaLocalRef(env, ret);
}
// Android Java
VideoDecoder DefaultVideoDecoderFactorypublic::createDecoder(VideoCodecInfo codecType) {
VideoDecoder softwareDecoder = softwareVideoDecoderFactory.createDecoder(codecType);
final VideoDecoder hardwareDecoder = hardwareVideoDecoderFactory.createDecoder(codecType);
if (softwareDecoder == null && platformSoftwareVideoDecoderFactory != null) {
softwareDecoder = platformSoftwareVideoDecoderFactory.createDecoder(codecType);
}
if (hardwareDecoder != null && softwareDecoder != null) {
// 一般都是这个返回给底层
// Both hardware and software supported, wrap it in a software fallback
return new VideoDecoderFallback(
/* fallback= */ softwareDecoder, /* primary= */ hardwareDecoder);
}
return hardwareDecoder != null ? hardwareDecoder : softwareDecoder;
}
// Android Java --- HardwareVideoDecoderFactory 的基类 MediaCodecVideoDecoderFactory
public VideoDecoder MediaCodecVideoDecoderFactory::createDecoder(VideoCodecInfo codecType) {
VideoCodecType type = VideoCodecType.valueOf(codecType.getName());
MediaCodecInfo info = findCodecForType(type);
if (info == null) {
return null;
}
CodecCapabilities capabilities = info.getCapabilitiesForType(type.mimeType());
return new AndroidVideoDecoder(new MediaCodecWrapperFactoryImpl(), info.getName(), type,
MediaCodecUtils.selectColorFormat(MediaCodecUtils.DECODER_COLOR_FORMATS, capabilities),
sharedContext);
}
2.
./sdk/android/src/jni/video_decoder_wrapper.cc
std::unique_ptr JavaToNativeVideoDecoder(
JNIEnv* jni,
const JavaRef& j_decoder) {
// 通知 Java 产生解码器
const jlong native_decoder =
Java_VideoDecoder_createNativeVideoDecoder(jni, j_decoder);
VideoDecoder* decoder;
if (native_decoder == 0) {
decoder = new VideoDecoderWrapper(jni, j_decoder);
} else {
decoder = reinterpret_cast(native_decoder);
}
return std::unique_ptr(decoder);
}
Java_VideoDecoder_createNativeVideoDecoder 这个会调用 Java 层的
public long VideoDecoderFallback::createNativeVideoDecoder() {
return nativeCreateDecoder(fallback, primary);
}
JNI_GENERATOR_EXPORT jlong Java_org_webrtc_VideoDecoderFallback_nativeCreateDecoder(
JNIEnv* env,
jclass jcaller,
jobject fallback,
jobject primary) {
return JNI_VideoDecoderFallback_CreateDecoder(env, base::android::JavaParamRef(env,
fallback), base::android::JavaParamRef(env, primary));
}
./sdk/android/src/jni/video_decoder_fallback.cc
static jlong JNI_VideoDecoderFallback_CreateDecoder(
JNIEnv* jni,
const JavaParamRef& j_fallback_decoder,
const JavaParamRef& j_primary_decoder)
VideoDecoder* nativeWrapper =
CreateVideoDecoderSoftwareFallbackWrapper(std::move(fallback_decoder), std::move(primary_decoder))
./api/video_codecs/video_decoder_software_fallback_wrapper.cc
std::unique_ptr CreateVideoDecoderSoftwareFallbackWrapper(
std::unique_ptr sw_fallback_decoder,
std::unique_ptr hw_decoder) {
return std::make_unique(
std::move(sw_fallback_decoder), std::move(hw_decoder));
}
// VideoDecoderSoftwareFallbackWrapper 这个就是 VCMGenericDecoder 里的 decoder_
10.2
./modules/video_coding/generic_decoder.cc
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp", frame.Timestamp());
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
_frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
_frameInfos[_nextFrameInfoIdx].ntp_time_ms = frame.EncodedImage().ntp_time_ms_;
_frameInfos[_nextFrameInfoIdx].packet_infos = frame.PacketInfos();
// Set correctly only for key frames. Thus, use latest key frame
// content type. If the corresponding key frame was lost, decode will fail
// and content type will be ignored.
if (frame.FrameType() == VideoFrameType::kVideoFrameKey) {
_frameInfos[_nextFrameInfoIdx].content_type = frame.contentType();
_last_keyframe_content_type = frame.contentType();
} else {
_frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
}
_callback->Map(frame.Timestamp(), &_frameInfos[_nextFrameInfoIdx]);
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
// 根据 10.1 的分析,我们知道 decoder_ 就是 VideoDecoderSoftwareFallbackWrapper
int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
frame.RenderTimeMs());
_callback->OnDecoderImplementationName(decoder_->ImplementationName());
if (ret < WEBRTC_VIDEO_CODEC_OK) {
RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
<< frame.Timestamp() << ", error code: " << ret;
_callback->Pop(frame.Timestamp());
return ret;
} else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT) {
// No output
_callback->Pop(frame.Timestamp());
}
return ret;
}
./api/video_codecs/video_decoder_software_fallback_wrapper.cc
int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
const EncodedImage& input_image,
bool missing_frames,
int64_t render_time_ms) {
switch (decoder_type_) {
case DecoderType::kNone:
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
case DecoderType::kHardware: {
int32_t ret = WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
ret = hw_decoder_->Decode(input_image, missing_frames, render_time_ms);
if (ret != WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE) {
return ret;
}
// HW decoder returned WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE or
// initialization failed, fallback to software.
if (!InitFallbackDecoder()) {
return ret;
}
// Fallback decoder initialized, fall-through.
RTC_FALLTHROUGH();
}
case DecoderType::kFallback:
return fallback_decoder_->Decode(input_image, missing_frames, render_time_ms);
default:
RTC_NOTREACHED();
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
// Java 层的 org.webrtc 硬解码函数
public VideoCodecStatus AndroidVideoDecoder::decode(EncodedImage frame, DecodeInfo info)
int index;
try {
index = codec.dequeueInputBuffer(DEQUEUE_INPUT_TIMEOUT_US);
} catch (IllegalStateException e) {
Logging.e(TAG, "dequeueInputBuffer failed", e);
return VideoCodecStatus.ERROR;
}
if (index < 0) {
// Decoder is falling behind. No input buffers available.
// The decoder can't simply drop frames; it might lose a key frame.
Logging.e(TAG, "decode() - no HW buffers available; decoder falling behind");
return VideoCodecStatus.ERROR;
}
ByteBuffer buffer;
try {
buffer = codec.getInputBuffers()[index];
} catch (IllegalStateException e) {
Logging.e(TAG, "getInputBuffers failed", e);
return VideoCodecStatus.ERROR;
}
if (buffer.capacity() < size) {
Logging.e(TAG, "decode() - HW buffer too small");
return VideoCodecStatus.ERROR;
}
buffer.put(frame.buffer);
frameInfos.offer(new FrameInfo(SystemClock.elapsedRealtime(), frame.rotation));
try {
codec.queueInputBuffer(index, 0 /* offset */, size,
TimeUnit.NANOSECONDS.toMicros(frame.captureTimeNs), 0 /* flags */);
} catch (IllegalStateException e) {
Logging.e(TAG, "queueInputBuffer failed", e);
frameInfos.pollLast();
return VideoCodecStatus.ERROR;
}
if (keyFrameRequired) {
keyFrameRequired = false;
}
return VideoCodecStatus.OK;
//-----------------------------------------------------------------------
// 致此,我们分析的数据从 jitterbuffer 到解码器进行解码的流程基本完毕!
//-----------------------------------------------------------------------