smart rtmpd 服务器支持 rtmp 的 hevc(h265) 视频流,我们具体 smart rtmpd 的信息简介如下,毕竟 hevc(h265) 不是 adobe 的标准,所以市面上缺少这么一款工具,下面的流程,就会带你怎么制作 ffmpeg ,让其支持自定制的 hevc(h265) 视频流
原文地址:http://www.qiyicc.com/bbs001/forum.php?mod=viewthread&tid=988&extra=page%3D1
一分钟快速搭建 rtmpd 服务器: https://blog.csdn.net/freeabc/article/details/102880984
软件下载地址: http://www.qiyicc.com/download/rtmpd.rar
github 地址:https://github.com/superconvert/smart_rtmpd
//--------------------------------------------------------------------------------------------------------------------------
我写文章一般是两个思路:
1. 下一步要调用什么对象的方法
2. 这一步的对象,怎么关联到下一步的对象的流程分析
这一步的流程主要阐述怎么关联下一步的对象的流程分析,当然这一步做了什么具体的工作,不能
详细展示,否则,太庞大了,需要各位朋友针对重点的部分,自己揣摩了。
1.
CallActivity::OnCreate
appRtcClient = new WebSocketRTCClient(this);
peerConnectionClient.createPeerConnectionFactory(options);
2. 发起音视频呼叫
startScreenCapture/startCall
CallActivity.java
CallActivity::startCall
appRtcClient.connectToRoom(roomConnectionParameters); --->
3. WebSocketRTCClient.java
WebSocketRTCClient::connectToRoom
connectToRoomInternal(); --->
4. WebSocketRTCClient.java
WebSocketRTCClient::connectToRoomInternal
wsClient = new WebSocketChannelClient(handler, this);
... ...
WebSocketRTCClient.this.signalingParametersReady(params); --->
5. CallActivity.java
CallActivity::onConnectedToRoom
6. CallActivity.java
CallActivity::onConnectedToRoomInternal
// 参考流程 6.1
videoCapturer = createVideoCapturer();
// 参考流程 7
peerConnectionClient.createPeerConnection(localProxyVideoSink, remoteSinks, videoCapturer, signalingParameters);
// 参考流程 8
peerConnectionClient.createOffer()
//------------------------------------------
// 6.1 视频抓取设备流程分析
//------------------------------------------
6.1.1 CallActivity.java
CallActivity::createScreenCapturer
return new ScreenCapturerAndroid(
mediaProjectionPermissionResultData, new MediaProjection.Callback() {
@Override
public void onStop() {
reportError("User revoked permission to capture the screen.");
}
});
7.
PeerConnectionClient::createPeerConnection
// 参见流程 7.1,就是配置一些音视频的配置参数
createMediaConstraintsInternal();
// 参见流程 7.2
createPeerConnectionInternal();
maybeCreateAndStartRtcEventLog();
7.1
createMediaConstraintsInternal()
7.2 实现
PeerConnectionClient::createPeerConnectionInternal
// 参见下面流程 7.2.1
peerConnection = factory.createPeerConnection(rtcConfig, pcObserver);
// 参见下面的流程 7.2.2
peerConnection.addTrack(createVideoTrack(videoCapturer), mediaStreamLabels);
remoteVideoTrack = getRemoteVideoTrack();
remoteVideoTrack.setEnabled(renderVideo);
for (VideoSink remoteSink : remoteSinks) {
remoteVideoTrack.addSink(remoteSink);
}
7.2.1 创建 PeerConnection 对象并初始化
7.2.1.1
PeerConnectionFactory::createPeerConnection
createPeerConnectionInternal
7.2.1.2
PeerConnectionFactory::createPeerConnectionInternal
// 这个是生产一个 Java 层的观察者
long nativeObserver = PeerConnection.createNativePeerConnectionObserver(observer);
1. Java
long nativePeerConnection = nativeCreatePeerConnection(nativeFactory, rtcConfig,
constraints, nativeObserver, sslCertificateVerifier);
2. C++
Java_org_webrtc_PeerConnectionFactory_nativeCreatePeerConnection
3. C++
./sdk/android/src/jni/pc/peer_connection_factory.cc
JNI_PeerConnectionFactory_CreatePeerConnection
rtc::scoped_refptr pc =
PeerConnectionFactoryFromJava(factory)->CreatePeerConnection(rtc_config,
std::move(peer_connection_dependencies));
//------------------------------------------------------------------------
4. 这个里面初始化的东西,都非常重要
//------------------------------------------------------------------------
./pc/peer_connection_factory.cpp
PeerConnectionFactory::CreatePeerConnection
dependencies.cert_generator = std::make_unique(signaling_thread_, network_thread_);
// 这个就是 socket 类厂!!!!!!!!!
packet_socket_factory = default_socket_factory_.get();
// 这个就是端口分配器!!!!!!!
dependencies.allocator = std::make_unique
(default_network_manager_.get(),
packet_socket_factory,configuration.turn_customizer);
dependencies.ice_transport_factory = std::make_unique();
// network_thread_ 非常重要,参考这篇博文
https://blog.csdn.net/freeabc/article/details/106142951
network_thread_->Invoke(RTC_FROM_HERE,
rtc::Bind(&cricket::PortAllocator::SetNetworkIgnoreMask,
dependencies.allocator.get(), options_.network_ignore_mask));
std::unique_ptr event_log = worker_thread_->Invoke>(RTC_FROM_HERE,
rtc::Bind(&PeerConnectionFactory::CreateRtcEventLog_w, this));
std::unique_ptr call = worker_thread_->Invoke>(RTC_FROM_HERE,
rtc::Bind(&PeerConnectionFactory::CreateCall_w, this, event_log.get()));
./call/call_factory.cc
std::unique_ptr(call_factory_->CreateCall(call_config));
./call/call.cc
Call::Create
new internal::Call(clock, config, std::make_unique(
clock, config.event_log, config.network_state_predictor_factory,
config.network_controller_factory, config.bitrate_config,
std::move(pacer_thread), config.task_queue_factory, config.trials),
std::move(call_thread), config.task_queue_factory)
internal::Call 的初始东西真不少
task_queue_factory_(task_queue_factory),
num_cpu_cores_(CpuInfo::DetectNumberOfCores()),
module_process_thread_(std::move(module_process_thread)),
call_stats_(new CallStats(clock_, module_process_thread_.get())),
bitrate_allocator_(new BitrateAllocator(this)),
config_(config),
audio_network_state_(kNetworkDown),
video_network_state_(kNetworkDown),
aggregate_network_up_(false),
receive_crit_(RWLockWrapper::CreateRWLock()),
send_crit_(RWLockWrapper::CreateRWLock()),
event_log_(config.event_log),
received_bytes_per_second_counter_(clock_, nullptr, true),
received_audio_bytes_per_second_counter_(clock_, nullptr, true),
received_video_bytes_per_second_counter_(clock_, nullptr, true),
received_rtcp_bytes_per_second_counter_(clock_, nullptr, true),
last_bandwidth_bps_(0),
min_allocated_send_bitrate_bps_(0),
configured_max_padding_bitrate_bps_(0),
estimated_send_bitrate_kbps_counter_(clock_, nullptr, true),
pacer_bitrate_kbps_counter_(clock_, nullptr, true),
receive_side_cc_(clock_, transport_send->packet_router()),
receive_time_calculator_(ReceiveTimeCalculator::CreateFromFieldTrial()),
video_send_delay_stats_(new SendDelayStats(clock_)),
start_ms_(clock_->TimeInMilliseconds()),
transport_send_ptr_(transport_send.get()),
transport_send_(std::move(transport_send)) {
worker_sequence_checker_.Detach();
call_stats_->RegisterStatsObserver(&receive_side_cc_);
module_process_thread_->RegisterModule(receive_side_cc_.GetRemoteBitrateEstimator(true), RTC_FROM_HERE);
module_process_thread_->RegisterModule(call_stats_.get(), RTC_FROM_HERE);
module_process_thread_->RegisterModule(&receive_side_cc_, RTC_FROM_HERE);
// PeerConnection 对象的创建,非常重要
rtc::scoped_refptr
// PeerConnection 初始化过程,非常重要
pc->Initialize(configuration, std::move(dependencies))
./pc/peer_connection.cc
PeerConnection::Initialize
observer_ = dependencies.observer;
async_resolver_factory_ = std::move(dependencies.async_resolver_factory);
port_allocator_ = std::move(dependencies.allocator);
ice_transport_factory_ = std::move(dependencies.ice_transport_factory);
tls_cert_verifier_ = std::move(dependencies.tls_cert_verifier);
const auto pa_result = network_thread()->Invoke(RTC_FROM_HERE,
rtc::Bind(&PeerConnection::InitializePortAllocator_n, this, stun_servers, turn_servers, configuration));
config.ice_transport_factory = ice_transport_factory_.get();
// 传输层控制器
transport_controller_.reset(new JsepTransportController(signaling_thread(),
network_thread(), port_allocator_.get(), async_resolver_factory_.get(), config));
stats_.reset(new StatsCollector(this));
stats_collector_ = RTCStatsCollector::Create(this);
webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory(
signaling_thread(), channel_manager(), this, session_id(),
std::move(dependencies.cert_generator), certificate, &ssrc_generator_));
// 音视频接收对象的创建, 非常重要,参考这篇博文
https://blog.csdn.net/freeabc/article/details/106142951
ransceivers_.push_back(
RtpTransceiverProxyWithInternal::Create(signaling_thread(), new RtpTransceiver(cricket::MEDIA_TYPE_AUDIO)));
transceivers_.push_back(
RtpTransceiverProxyWithInternal::Create(signaling_thread(), new RtpTransceiver(cricket::MEDIA_TYPE_VIDEO)));
video_bitrate_allocator_factory_ = CreateBuiltinVideoBitrateAllocatorFactory();
// 初始化过程我们建立了transport_controller_(JsepTransportController), webrtc_session_desc_factory_(WebRtcSessionDescriptionFactory)
// 音视频接收对象 ransceivers_, RtpTransceiver( MEDIA_TYPE_AUDIO & MEDIA_TYPE_VIDEO )
// 返回给 Java 层的 C++ 代理指针
PeerConnectionProxy::Create(signaling_thread(), pc);
7.2.2 创建 video track 流程
peerConnection.addTrack(createVideoTrack(videoCapturer), mediaStreamLabels);
//-----------------------------------------------------------
// createVideoTrack 分析
//-----------------------------------------------------------
PeerConnectionClient::createVideoTrack
// 参见流程 7.2.2.1
videoSource = factory.createVideoSource(capturer.isScreencast());
// 参见流程 7.2.2.2
capturer.initialize(surfaceTextureHelper, appContext, videoSource.getCapturerObserver());
capturer.startCapture(videoWidth, videoHeight, videoFps);
// 参见流程 7.2.2.3
localVideoTrack = factory.createVideoTrack(VIDEO_TRACK_ID, videoSource);
7.2.2.1 JNI 层视频源产生流程分析
1.
PeerConnectionFactory::createVideoSource(boolean isScreencast)
return createVideoSource(isScreencast, /* alignTimestamps= */ true);
2.
PeerConnectionFactory::createVideoSource(boolean isScreencast, boolean alignTimestamps)
return new VideoSource(nativeCreateVideoSource(nativeFactory, isScreencast, alignTimestamps));
3.
nativeCreateVideoSource(nativeFactory, isScreencast, alignTimestamps)
4.
Java_org_webrtc_PeerConnectionFactory_nativeCreateVideoSource
5.
./sdk/android/src/jni/pc/peer_connection_factory.cc
JNI_PeerConnectionFactory_CreateVideoSource
return jlongFromPointer(CreateVideoSource(jni, factory->signaling_thread(),
factory->worker_thread(), is_screencast, align_timestamps));
6. 我们上层的对象 VideoTrack 其实就是对应 JNI 层的 AndroidVideoTrackSource
./jni/pc/video.cc
void* CreateVideoSource(JNIEnv* env, rtc::Thread* signaling_thread, rtc::Thread* worker_thread,
jboolean is_screencast, jboolean align_timestamps)
rtc::scoped_refptr source(new rtc::RefCountedObject(
signaling_thread, env, is_screencast, align_timestamps));
7.2.2.2 视频采集 capturerObserver 观察者运作流程分析
capturerObserver
onCapturerStarted
onCapturerStopped
//视频采集流程并处理的流程!!!!!
onFrameCaptured
NativeAndroidVideoTrackSource.onFrameCaptured
1. Java
nativeOnFrameCaptured
2. C++
Java_org_webrtc_NativeAndroidVideoTrackSource_nativeOnFrameCaptured
3. 调用上一步逻辑产生的 AndroidVideoTrackSource 对象
native->OnFrameCaptured(env, rotation, timestampNs, base::android::JavaParamRef(env, buffer))
4.
./sdk/android/src/jni/pc/android_video_track_source.cc
AndroidVideoTrackSource::OnFrameCaptured
rtc::scoped_refptr buffer = AndroidVideoBuffer::Create(env, j_video_frame_buffer);
// 参见流程 4.1
OnFrame(VideoFrame::Builder().set_video_frame_buffer(buffer).set_rotation(rotation)
.set_timestamp_us(j_timestamp_ns / rtc::kNumNanosecsPerMicrosec).build());
4.1 OnFrame 调用父类 AdaptedVideoTrackSource 的
./media/base/adapted_video_track_source.cc
AdaptedVideoTrackSource::OnFrame
broadcaster_.OnFrame(frame)
4.2 其实是通过这个 VideoBroadcaster 通知采集到视频的
./media/base/video_broadcaster.cc
VideoBroadcaster::OnFrame
sink_pair.sink->OnFrame(frame);
而 sink_pair 对象是通过 AddOrUpdateSink 添加的 ,具体流程
参考 nativeAddTrack(track.getNativeMediaStreamTrack(), streamIds)
7.2.2.3 创建 VideoTrack 分析
new VideoTrack(nativeCreateVideoTrack(nativeFactory, id, source.getNativeVideoTrackSource()))
1. Java
nativeCreateVideoTrack
2. C++
Java_org_webrtc_PeerConnectionFactory_nativeCreateVideoTrack
3. C++
./sdk/android/src/jni/pc/peer_connection_factory.cc
JNI_PeerConnectionFactory_CreateVideoTrack
rtc::scoped_refptr track =
PeerConnectionFactoryFromJava(native_factory)->CreateVideoTrack(JavaToStdString(jni, id),
reinterpret_cast(native_source));
4.
./pc/peer_connection_factory.cc
PeerConnectionFactory::CreateVideoTrack
rtc::scoped_refptr track(VideoTrack::Create(id, source, worker_thread_));
5.
./pc/video_track.cc
VideoTrack::Create
rtc::RefCountedObject* track = new rtc::RefCountedObject(id, source, worker_thread);
6.
VideoTrack::OnChanged
video_source_->RegisterObserver(this);
//-----------------------------------------------------------
// addTrack 流程分析
//-----------------------------------------------------------
PeerConnection::addTrack
1. Java
RtpSender newSender = nativeAddTrack(track.getNativeMediaStreamTrack(), streamIds);
2. C++
Java_org_webrtc_PeerConnection_nativeAddTrack
3.
./sdk/android/src/jni/pc/peer_connection.cc
JNI_PeerConnection_AddTrack
ExtractNativePC(jni, j_pc)->AddTrack(
reinterpret_cast(native_track),
JavaListToNativeVector(jni,
j_stream_labels, &JavaToNativeString));
4.
./pc/peer_connection.cc
PeerConnection::AddTrack
PeerConnection::AddTrackPlanB
// 参见流程 4.1
auto new_sender = CreateSender(media_type, track->id(), track, adjusted_stream_ids, {});
// 视频通道设置
new_sender->internal()->SetMediaChannel(video_media_channel());
GetVideoTransceiver()->internal()->AddSender(new_sender);
const RtpSenderInfo* sender_info = FindSenderInfo(local_video_sender_infos_,
new_sender->internal()->stream_ids()[0], track->id());
if (sender_info) {
new_sender->internal()->SetSsrc(sender_info->first_ssrc);
}
// 音频通道设置
new_sender->internal()->SetMediaChannel(voice_media_channel());
GetAudioTransceiver()->internal()->AddSender(new_sender);
const RtpSenderInfo* sender_info = FindSenderInfo(local_audio_sender_infos_,
new_sender->internal()->stream_ids()[0], track->id());
if (sender_info) {
new_sender->internal()->SetSsrc(sender_info->first_ssrc);
}
4.1
PeerConnection::CreateSender
// 视频发送对象
sender = RtpSenderProxyWithInternal::Create(signaling_thread(),
VideoRtpSender::Create(worker_thread(), id, this));
NoteUsageEvent(UsageEvent::VIDEO_ADDED);
// 音频发送对象
sender = RtpSenderProxyWithInternal::Create(signaling_thread(),
AudioRtpSender::Create(worker_thread(), id, stats_.get(), this));
NoteUsageEvent(UsageEvent::AUDIO_ADDED);
//-----------------------------------------------------------------
// 这个里面把音视频采集挂接到编码器,参考下面流程分析 4.1.1, AddOrUpdateSink
//-----------------------------------------------------------------
bool set_track_succeeded = sender->SetTrack(track);
sender->internal()->set_stream_ids(stream_ids);
sender->internal()->set_init_send_encodings(send_encodings);
4.1.1
./pc/rtp_sender.cc
RtpSenderBase::SetTrack
SetSend();
4.1.2
./pc/rtp_sender.cc
VideoRtpSender::SetSend()
VideoTrackSourceInterface* source = video_track()->GetSource();
// video_media_channel() 就是 WebRtcVideoChannel 对象,具体产生过程下面流程 4.2.2.1 中的子流程 2
video_media_channel()->SetVideoSend(ssrc_, &options, video_track());
4.1.3
./media/engine/webrtc_video_engine.cc
WebRtcVideoChannel::SetVideoSend
const auto& kv = send_streams_.find(ssrc);
kv->second->SetVideoSend(options, source);
4.1.4
./media/engine/webrtc_video_engine.cc
WebRtcVideoChannel::WebRtcVideoSendStream::SetVideoSend
stream_->SetSource(this, GetDegradationPreference());
这里的 stream_ 是 WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream
stream_ = call_->CreateVideoSendStream(std::move(config), parameters_.encoder_config.Copy());
创建的 Call::CreateVideoSendStream 的对象 VideoSendStream
4.1.5
./video/video_send_stream.cc
VideoSendStream::SetSource
video_stream_encoder_->SetSource(source, degradation_preference);
4.1.6
// -----------------------------------------------------------------------------------
// 这个地方把视频采集和编码做了一个 pipeline,把编码器对象注册到 broadcaster_(VideoBroadcaster)
// 这样视频的数据就可以通过接口 OnFrame 通知到各个编码器了
// -----------------------------------------------------------------------------------
./video/video_stream_encoder.cc
VideoStreamEncoder::VideoSourceProxy::SetSource
source->AddOrUpdateSink(video_stream_encoder_, wants);
7.2.3 创建 audio track 流程(略,有兴趣的自己分析)
peerConnection.addTrack(createAudioTrack(), mediaStreamLabels)
audioSource = factory.createAudioSource(audioConstraints);
localAudioTrack = factory.createAudioTrack(AUDIO_TRACK_ID, audioSource);
7.2.4 远端 track 的设置
remoteVideoTrack = getRemoteVideoTrack();
remoteVideoTrack.setEnabled(renderVideo);
for (VideoSink remoteSink : remoteSinks) {
remoteVideoTrack.addSink(remoteSink);
}
8. createOffer 流程分析
8.1 Java
nativeCreateOffer(observer, constraints);
8.2 C++
Java_org_webrtc_PeerConnection_nativeCreateOffer
8.3 C++
./sdk/android/src/jni/pc/peer_connection.cc
JNI_PeerConnection_CreateOffer
rtc::scoped_refptr observer(new
rtc::RefCountedObject(jni, j_observer, std::move(constraints)));
// 就是这个对象,参见 ExtractNativePC 就是这个对象 PeerConnectionProxy::Create
ExtractNativePC(jni, j_pc)->CreateOffer(observer, options);
./pc/peer_connection.cc
PeerConnection::CreateOffer
this_weak_ptr->DoCreateOffer(options, observer_wrapper);
./pc/peer_connection.cc
PeerConnection::DoCreateOffer
// webrtc_session_desc_factory_ 的构造,参见 PeerConnection::Initialize
// webrtc_session_desc_factory_.reset(new WebRtcSessionDescriptionFactory...
webrtc_session_desc_factory_->CreateOffer(observer, options, session_options);
8.4
./pc/webrtc_session_description_factory.cc
WebRtcSessionDescriptionFactory::CreateOffer
InternalCreateOffer(request);
./pc/webrtc_session_description_factory.cc
WebRtcSessionDescriptionFactory::InternalCreateOffer
std::unique_ptr desc = session_desc_factory_.CreateOffer(request.options,
pc_->local_description() ? pc_->local_description()->description() : nullptr);
./pc/media_session.cc
MediaSessionDescriptionFactory::CreateOffer
GetActiveContents
GetCurrentStreamParams
GetCodecsForOffer
FilterDataCodecs
GetRtpHdrExtsToOffer
for :
switch :
AddAudioContentForOffer
AddVideoContentForOffer
AddDataContentForOffer
UpdateCryptoParamsForBundle
WebRtcSessionDescriptionFactory::InternalCreateOffer
创建成功则,调用
1. auto offer = std::make_unique
rtc::ToString(session_version_++));
2. CopyCandidatesFromSessionDescription
3.
./pc/webrtc_session_description_factory.cc
PostCreateSessionDescriptionSucceeded(request.observer, std::move(offer));
4.
./pc/webrtc_session_description_factory.cc
WebRtcSessionDescriptionFactory::OnMessage(rtc::Message* msg)
1. 失败
param->observer->OnFailure(std::move(param->error))
2. 创建 SDP 成功,通过 observer 通知到 java 层,见下面流程
param->observer->OnSuccess(param->description.release());
1. Java
SDPObserver::onCreateSuccess
2. Java
peerConnection.setLocalDescription
nativeSetLocalDescription
3. C++
Java_org_webrtc_PeerConnection_nativeSetLocalDescription
JNI_PeerConnection_SetLocalDescription
4. C++
./sdk/android/src/jni/pc/peer_connection.cc
JNI_PeerConnection_SetLocalDescription
// Sdp 成功后,会调用流程 4.1
rtc::scoped_refptr observer(
new rtc::RefCountedObject(jni, j_observer, nullptr));
// 参见流程 4.2
ExtractNativePC(jni, j_pc)->SetLocalDescription(observer,
JavaToNativeSessionDescription(jni, j_sdp).release());
./pc/peer_connection.cc
void PeerConnection::SetLocalDescription(
SetSessionDescriptionObserver* observer,
SessionDescriptionInterface* desc_ptr)
this_weak_ptr->DoSetLocalDescription(std::move(desc), std::move(observer_refptr));
./pc/peer_connection.cc
void PeerConnection::DoSetLocalDescription(
std::unique_ptr desc,
rtc::scoped_refptr observer)
PostSetSessionDescriptionSuccess(observer);
//-------------------------------------------------------------------
4.1 PostSetSessionDescriptionSuccess,就会回调这个函数
//-------------------------------------------------------------------
4.1.1
void SetSdpObserverJni::OnSuccess()
Java_SdpObserver_onSetSuccess(env, j_observer_global_);
4.1.2
PeerConnectionClient::SDPObserver::onSetSuccess
events.onLocalDescription(localSdp)
4.1.3 会回调 java 层的接口
PeerConnectionClient::SDPObserver::onSetSuccess
events.onLocalDescription(localSdp)
//************************************************************************
//
4.1.4 这个地方就会发送 SDP 到对方 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
//************************************************************************
CallActivity::onLocalDescription
appRtcClient.sendOfferSdp(sdp);
//------------------------------------------------------------------
4.2 探测信息采集流程如下
//------------------------------------------------------------------
4.2.1
./pc/peer_connection.cc
PeerConnection::SetLocalDescription
4.2.2
PeerConnection::DoSetLocalDescription
// 参见流程 4.2.2.1
error = ApplyLocalDescription(std::move(desc));
// 参见流程 4.2.2.2
PostSetSessionDescriptionSuccess(observer);
// 参见流程 4.2.2.3
transport_controller_->MaybeStartGathering();
4.2.2.1
PeerConnection::ApplyLocalDescription
// webrtc自带了Simulcast功能,可以将一个分辨率的流编码成多个分辨率并发送,观看端
// 可以根据带宽去动态的选择某个分辨率,也可以自己选择某个分辨率
ReportSimulcastApiVersion(kSimulcastVersionApplyLocalDescription,
*local_description()->description());
// 参考下面流程 1
RTCError error = PushdownTransportDescription(cricket::CS_LOCAL, type);
// 参考下面流程 2
RTCError error = CreateChannels(*local_description()->description());
// 参考下面流程 3
error = UpdateSessionState(type, cricket::CS_LOCAL, local_description()->description());
1. 这个里面主要是创建通讯的套接字 ice(P2PTransportChannel*), dtls_srtp_transport, JsepTransport 并绑定他们之间的关联
PeerConnection::PushdownTransportDescription
// 如果是主叫 则执行
transport_controller_->SetLocalDescription
// 被叫,则执行, 我们分析的是主叫流程
transport_controller_->SetRemoteDescription
1.1
./pc/jsep_transport_controller.cc
JsepTransportController::SetLocalDescription
return ApplyDescription_n(/*local=*/true, type, description);
1.2
RTCError JsepTransportController::ApplyDescription_n(
bool local, SdpType type, const cricket::SessionDescription* description)
// for 这里是一个循环,针对每个 sdp 内容创建一个对应的 JsepTransport
for (const cricket::ContentInfo& content_info : description->contents()) {
// Don't create transports for rejected m-lines and bundled m-lines."
if (content_info.rejected || (IsBundled(content_info.name) && content_info.name != *bundled_mid())) {
continue;
}
// 参见 1.2.1 分析
error = MaybeCreateJsepTransport(local, content_info, *description);
if (!error.ok()) {
return error;
}
}
cricket::JsepTransportDescription jsep_description = CreateJsepTransportDescription(content_info, transport_info,
extension_ids, rtp_abs_sendtime_extn_id, media_alt_protocol, data_alt_protocol);
if (local) {
error = transport->SetLocalJsepTransportDescription(jsep_description, type);
} else {
error = transport->SetRemoteJsepTransportDescription(jsep_description, type);
}
1.2.1 MaybeCreateJsepTransport 流程的描述
RTCError JsepTransportController::MaybeCreateJsepTransport(
bool local, const cricket::ContentInfo& content_info,
const cricket::SessionDescription& description)
// 重要, 这里产生的 ice 对象,其实就是 P2PTransportChannel 对象 参见下面流程 1
rtc::scoped_refptr ice = CreateIceTransport(content_info.name, /*rtcp=*/false);
// 重要, 我们的数据是加密的 RTP, 参见下面流程 2
std::unique_ptr rtp_dtls_transport = CreateDtlsTransport(content_info, ice->internal(), nullptr);
dtls_srtp_transport = CreateDtlsSrtpTransport(content_info.name, rtp_dtls_transport.get(), rtcp_dtls_transport.get());
std::unique_ptr jsep_transport =
std::make_unique(
content_info.name, certificate_, std::move(ice), std::move(rtcp_ice),
std::move(unencrypted_rtp_transport), std::move(sdes_transport),
std::move(dtls_srtp_transport), std::move(datagram_rtp_transport),
std::move(rtp_dtls_transport), std::move(rtcp_dtls_transport),
std::move(sctp_transport), std::move(datagram_transport),
data_channel_transport);
// 后面的根据名称获取的 jsep_transport 就是这里产生的
jsep_transports_by_name_[content_info.name] = std::move(jsep_transport);
UpdateAggregateStates_n();
1. CreateIceTransport 流程分析
rtc::scoped_refptr
JsepTransportController::CreateIceTransport(const std::string& transport_name, bool rtcp)
// ice_transport_factory 参见源码中的 std::make_unique();
return config_.ice_transport_factory->CreateIceTransport(transport_name, component, std::move(init));
//*******************************************************************************************
看到了吧,上面我们用到的 ice_transport 其实就是 P2PTransportChannel 对象
//*******************************************************************************************
./p2p/base/default_ice_transport_factory.cc
DefaultIceTransportFactory::CreateIceTransport
return new rtc::RefCountedObject(
std::make_unique(
transport_name, component, init.port_allocator(),
init.async_resolver_factory(), init.event_log(), &factory));
2. CreateDtlsTransport 流程分析
std::unique_ptr JsepTransportController::CreateDtlsTransport(
const cricket::ContentInfo& content_info,
cricket::IceTransportInternal* ice,
DatagramTransportInterface* datagram_transport)
dtls = std::make_unique(ice, config_.crypto_options, config_.event_log);
./p2p/base/dtls_transport.cc
DtlsTransport::DtlsTransport(IceTransportInternal* ice_transport,
const webrtc::CryptoOptions& crypto_options, webrtc::RtcEventLog* event_log)
ConnectToIceTransport()
// 这个流程在博文 https://blog.csdn.net/freeabc/article/details/106142951 有详细讲述
void DtlsTransport::ConnectToIceTransport() {
RTC_DCHECK(ice_transport_);
ice_transport_->SignalWritableState.connect(this, &DtlsTransport::OnWritableState);
ice_transport_->SignalReadPacket.connect(this, &DtlsTransport::OnReadPacket);
ice_transport_->SignalSentPacket.connect(this, &DtlsTransport::OnSentPacket);
ice_transport_->SignalReadyToSend.connect(this, &DtlsTransport::OnReadyToSend);
ice_transport_->SignalReceivingState.connect(this, &DtlsTransport::OnReceivingState);
ice_transport_->SignalNetworkRouteChanged.connect(this, &DtlsTransport::OnNetworkRouteChanged);
}
2. 我们这里以分析视频为主, 这个里面的流程主要就是创建一个 WebRtcVideoChannel 对象
PeerConnection::CreateChannels
cricket::VoiceChannel* voice_channel = CreateVoiceChannel(voice->name);
GetAudioTransceiver()->internal()->SetChannel(voice_channel);
cricket::VideoChannel* video_channel = CreateVideoChannel(video->name);
GetVideoTransceiver()->internal()->SetChannel(video_channel);
// CreateVideoChannel 函数定义
cricket::VideoChannel* PeerConnection::CreateVideoChannel(const std::string& mid) {
RtpTransportInternal* rtp_transport = GetRtpTransport(mid);
MediaTransportConfig media_transport_config = transport_controller_->GetMediaTransportConfig(mid);
cricket::VideoChannel* video_channel = channel_manager()->CreateVideoChannel(
call_ptr_, configuration_.media_config, rtp_transport,
media_transport_config, signaling_thread(), mid, SrtpRequired(),
GetCryptoOptions(), &ssrc_generator_, video_options_,
video_bitrate_allocator_factory_.get());
if (!video_channel) {
return nullptr;
}
video_channel->SignalDtlsSrtpSetupFailure.connect(this, &PeerConnection::OnDtlsSrtpSetupFailure);
video_channel->SignalSentPacket.connect(this, &PeerConnection::OnSentPacket_w);
video_channel->SetRtpTransport(rtp_transport);
return video_channel;
}
//
./pc/channel_manager.cc
VideoChannel* ChannelManager::CreateVideoChannel(
webrtc::Call* call,
const cricket::MediaConfig& media_config,
webrtc::RtpTransportInternal* rtp_transport,
const webrtc::MediaTransportConfig& media_transport_config,
rtc::Thread* signaling_thread,
const std::string& content_name,
bool srtp_required,
const webrtc::CryptoOptions& crypto_options,
rtc::UniqueRandomIdGenerator* ssrc_generator,
const VideoOptions& options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory)
//---------------------------------------------------------------------------
// 这个 media_engine 就是 createPeerConnectionFactory 里的 CompositeMediaEngine,
// 其中视频是 WebRtcVideoEngine,音频是 WebRtcVoiceEngine,视频编码工厂就是
// VideoEncoderFactoryWrapper,解码工厂就是 VideoDecoderFactoryWrapper,
// 视频编码工厂创建的
// VideoEncoderFactoryWrapper::CreateVideoEncoder 这个里面
// 调用了 Java_VideoEncoderFactory_createEncoder 编码器产生了。。。。。。
// VideoStreamEncoder::ReconfigureEncoder -->
// if (encoder_->InitEncode(&send_codec_, VideoEncoder::Settings(
// settings_.capabilities, number_of_cores_,
// max_data_payload_length)) != 0)
// VideoEncoderWrapper::InitEncodeInternal -->
// Java_VideoEncoder_initEncode
// 这里面的有个 callback 函数,其实就是底层的
// VideoEncoderWrapper::OnEncodedFrame
// ./video/video_stream_encoder.cc
// encoder_->RegisterEncodeCompleteCallback(this);
// 最终会发送到
//---------------------------------------------------------------------------
VideoMediaChannel* media_channel = media_engine_->video().CreateMediaChannel(
call, media_config, options, crypto_options,
video_bitrate_allocator_factory);
auto video_channel = std::make_unique(
worker_thread_, network_thread_, signaling_thread,
absl::WrapUnique(media_channel), content_name, srtp_required,
crypto_options, ssrc_generator);
video_channel->Init_w(rtp_transport, media_transport_config);
VideoChannel* video_channel_ptr = video_channel.get();
video_channels_.push_back(std::move(video_channel));
return video_channel_ptr;
./media/engine/webrtc_video_engine.cc
VideoMediaChannel* WebRtcVideoEngine::CreateMediaChannel(
webrtc::Call* call,
const MediaConfig& config,
const VideoOptions& options,
const webrtc::CryptoOptions& crypto_options,
webrtc::VideoBitrateAllocatorFactory* video_bitrate_allocator_factory) {
RTC_LOG(LS_INFO) << "CreateMediaChannel. Options: " << options.ToString();
//-------------------------------------------------------------------------------------
// 音频频编码工厂就是这个地方传递过去的
// 这个里面的 settings_.encoder_factory 就是 WebRtcVideoChannel::AddSendStream 里的 encoder_factory_;
// VideoStreamEncoder::ReconfigureEncoder()
// encoder_ = settings_.encoder_factory->CreateVideoEncoder(encoder_config_.video_format);
// 我们查看一下 WebRtcVideoChannel 里的 encoder_factory_;
// WebRtcVideoChannel::AddSendStream
// config.encoder_settings.encoder_factory = encoder_factory_;
// 这个 encoder_factory__ 是 WebRtcVideoChannel::WebRtcVideoChannel 里传递过来的
//-------------------------------------------------------------------------------------
return new WebRtcVideoChannel(call, config, options, crypto_options,
encoder_factory_.get(), decoder_factory_.get(), video_bitrate_allocator_factory);
}
3.
PeerConnection::UpdateSessionState
RTCError error = PushdownMediaDescription(type, source);
PeerConnection::PushdownMediaDescription
for (const auto& transceiver : transceivers_) {
const ContentInfo* content_info =
FindMediaSectionForTransceiver(transceiver, sdesc);
cricket::ChannelInterface* channel = transceiver->internal()->channel();
if (!channel || !content_info || content_info->rejected) {
continue;
}
const MediaContentDescription* content_desc = content_info->media_description();
if (!content_desc) {
continue;
}
std::string error;
bool success = (source == cricket::CS_LOCAL)
? channel->SetLocalContent(content_desc, type, &error)
: channel->SetRemoteContent(content_desc, type, &error);
if (!success) {
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_PARAMETER, error);
}
}
./pc/channel.cc
VideoChannel::SetLocalContent(const MediaContentDescription* content,
SdpType type, std::string* error_desc)
return InvokeOnWorker(
RTC_FROM_HERE, Bind(&BaseChannel::SetLocalContent_w, this, content, type, error_desc));
bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content,
SdpType type, std::string* error_desc)
if (!UpdateLocalStreams_w(video->streams(), type, error_desc)) {
SafeSetError("Failed to set local video description streams.", error_desc);
return false;
}
bool BaseChannel::UpdateLocalStreams_w(const std::vector& streams,
SdpType type, std::string* error_desc)
if (media_channel()->AddSendStream(new_stream)) {
RTC_LOG(LS_INFO) << "Add send stream ssrc: " << new_stream.ssrcs[0];
}
./media/engine/webrtc_video_engine.cc
bool WebRtcVideoChannel::AddSendStream(const StreamParams& sp)
WebRtcVideoSendStream* stream = new WebRtcVideoSendStream(
call_, sp, std::move(config), default_send_options_,
video_config_.enable_cpu_adaptation, bitrate_config_.max_bitrate_bps,
send_codec_, send_rtp_extensions_, send_params_);
./media/engine/webrtc_video_engine.cc
WebRtcVideoChannel::WebRtcVideoSendStream::WebRtcVideoSendStream
SetCodec(*codec_settings);
void WebRtcVideoChannel::WebRtcVideoSendStream::SetCodec(const VideoCodecSettings&
codec_settings)
parameters_.encoder_config = CreateVideoEncoderConfig(codec_settings.codec);
RecreateWebRtcStream();
void WebRtcVideoChannel::WebRtcVideoSendStream::RecreateWebRtcStream()
stream_ = call_->CreateVideoSendStream(std::move(config), parameters_.encoder_config.Copy());
./call/call.cc
webrtc::VideoSendStream* Call::CreateVideoSendStream(webrtc::VideoSendStream::Config config,
VideoEncoderConfig encoder_config, std::unique_ptr fec_controller)
VideoSendStream* send_stream = new VideoSendStream(clock_, num_cpu_cores_,
module_process_thread_.get(), task_queue_factory_, call_stats_.get(), transport_send_ptr_,
bitrate_allocator_.get(), video_send_delay_stats_.get(), event_log_, std::move(config),
std::move(encoder_config), suspended_video_send_ssrcs_, suspended_video_payload_states_,
std::move(fec_controller));
./video/video_send_stream.cc
VideoSendStream::VideoSendStream
// 就个就是创建的视频编码器的地方, 这个编码通过 AddOrUpdateSink 挂接到 VideoSource 的,比如摄像头,录屏对象
// 参见下面流程 1
video_stream_encoder_ = CreateVideoStreamEncoder(clock, task_queue_factory, num_cpu_cores,
&stats_proxy_, config_.encoder_settings);
worker_queue_->PostTask(ToQueuedTask(
[this, clock, call_stats, transport, bitrate_allocator, send_delay_stats,
event_log, &suspended_ssrcs, &encoder_config, &suspended_payload_states,
&fec_controller]() {
// 发送流对象生成,包含源,编码,视频处理,网络发送
// 参见下面流程 2
send_stream_.reset(new VideoSendStreamImpl(
clock, &stats_proxy_, worker_queue_, call_stats, transport,
bitrate_allocator, send_delay_stats, video_stream_encoder_.get(),
event_log, &config_, encoder_config.max_bitrate_bps,
encoder_config.bitrate_priority, suspended_ssrcs,
suspended_payload_states, encoder_config.content_type,
std::move(fec_controller)));
},
[this]() { thread_sync_event_.Set(); }));
1. CreateVideoStreamEncoder 流程
./api/video/video_stream_encoder_create.cc
std::unique_ptr CreateVideoStreamEncoder(
Clock* clock, TaskQueueFactory* task_queue_factory, uint32_t number_of_cores,
VideoStreamEncoderObserver* encoder_stats_observer, const VideoStreamEncoderSettings& settings) {
return std::make_unique(
clock, number_of_cores, encoder_stats_observer, settings,
std::make_unique(encoder_stats_observer), task_queue_factory);
}
./video/video_stream_encoder.cc
VideoStreamEncoder::VideoStreamEncoder
2. VideoSendStreamImpl 流程
./video/video_send_stream_impl.cc
VideoSendStreamImpl::VideoSendStreamImpl(
Clock* clock,
SendStatisticsProxy* stats_proxy,
rtc::TaskQueue* worker_queue,
CallStats* call_stats,
RtpTransportControllerSendInterface* transport,
BitrateAllocatorInterface* bitrate_allocator,
SendDelayStats* send_delay_stats,
VideoStreamEncoderInterface* video_stream_encoder,
RtcEventLog* event_log,
const VideoSendStream::Config* config,
int initial_encoder_max_bitrate,
double initial_encoder_bitrate_priority,
std::map suspended_ssrcs,
std::map suspended_payload_states,
VideoEncoderConfig::ContentType content_type,
std::unique_ptr fec_controller)
rtp_video_sender_(transport_->CreateRtpVideoSender(
suspended_ssrcs,
suspended_payload_states,
config_->rtp,
config_->rtcp_report_interval_ms,
config_->send_transport,
CreateObservers(call_stats,
&encoder_feedback_,
stats_proxy_,
send_delay_stats),
event_log,
std::move(fec_controller),
CreateFrameEncryptionConfig(config_))),
./call/rtp_transport_controller_send.cc
RtpVideoSenderInterface* RtpTransportControllerSend::CreateRtpVideoSender(
std::map suspended_ssrcs,
const std::map& states,
const RtpConfig& rtp_config,
int rtcp_report_interval_ms,
Transport* send_transport,
const RtpSenderObservers& observers,
RtcEventLog* event_log,
std::unique_ptr fec_controller,
const RtpSenderFrameEncryptionConfig& frame_encryption_config)
video_rtp_senders_.push_back(std::make_unique(
clock_, suspended_ssrcs, states, rtp_config, rtcp_report_interval_ms,
send_transport, observers,
// TODO(holmer): Remove this circular dependency by injecting
// the parts of RtpTransportControllerSendInterface that are really used.
this, event_log, &retransmission_rate_limiter_, std::move(fec_controller),
frame_encryption_config.frame_encryptor,
frame_encryption_config.crypto_options));
return video_rtp_senders_.back().get();
./call/rtp_video_sender.cc
RtpVideoSender::RtpVideoSender
有关建立采集,编码,发送的 pipeline ,采集的数据发送到编码器,然后编码器之后数据处理到发送。
参见上面的流程 PeerConnection::CreateSender
4.2.2.2
void PeerConnection::OnMessage(rtc::Message* msg) {
RTC_DCHECK_RUN_ON(signaling_thread());
switch (msg->message_id) {
case MSG_SET_SESSIONDESCRIPTION_SUCCESS: {
SetSessionDescriptionMsg* param =
static_cast(msg->pdata);
//------------------------------------------------------------
// 参见上面 4.1 的流程,就是这个激发发送 SDP 到对方
//------------------------------------------------------------
param->observer->OnSuccess();
delete param;
break;
}
... ...
}
4.2.2.3 对象 transport_controller_ 创建,参见 PeerConnection::Initialize
./pc/jsep_transport_controller.cc
JsepTransportController::MaybeStartGathering
dtls->ice_transport()->MaybeStartGathering();
// dtls->ice_transport() 就是 P2PTransportChannel 对象,参考博客 https://blog.csdn.net/freeabc/article/details/106142951
./api/ice_transport_factory.cc
rtc::scoped_refptr CreateIceTransport(
IceTransportInit init) {
return new rtc::RefCountedObject(
std::make_unique(
"", 0, init.port_allocator(), init.async_resolver_factory(),
init.event_log()));
}
./p2p/base/p2p_transport_channel.cc
P2PTransportChannel::MaybeStartGathering
// CreateSession 参见下面流程 4.2.2.3.1
// AddAllocatorSession 参见下面流程 4.2.2.3.2
AddAllocatorSession(allocator_->CreateSession(transport_name(), component(), ice_parameters_.ufrag,
ice_parameters_.pwd));
// 参见下面流程 4.2.2.3.3
allocator_sessions_.back()->StartGettingPorts();
// 4.2.2.3.1
// CreateSession 的定义
./p2p/base/port_allocator.cc
std::unique_ptr PortAllocator::CreateSession(
const std::string& content_name,
int component,
const std::string& ice_ufrag,
const std::string& ice_pwd) {
CheckRunOnValidThreadAndInitialized();
auto session = std::unique_ptr(
CreateSessionInternal(content_name, component, ice_ufrag, ice_pwd));
session->SetCandidateFilter(candidate_filter());
return session;
}
./p2p/client/basic_port_allocator.cc
PortAllocatorSession* BasicPortAllocator::CreateSessionInternal(
const std::string& content_name,
int component,
const std::string& ice_ufrag,
const std::string& ice_pwd) {
CheckRunOnValidThreadAndInitialized();
PortAllocatorSession* session = new BasicPortAllocatorSession(
this, content_name, component, ice_ufrag, ice_pwd);
session->SignalIceRegathering.connect(this,
&BasicPortAllocator::OnIceRegathering);
return session;
}
// 4.2.2.3.2
P2PTransportChannel::AddAllocatorSession
regathering_controller_->set_allocator_session(allocator_session());
PruneAllPorts();
// 4.2.2.3.3
./p2p/client/basic_port_allocator.cc
void BasicPortAllocatorSession::StartGettingPorts() {
RTC_DCHECK_RUN_ON(network_thread_);
state_ = SessionState::GATHERING;
if (!socket_factory_) {
// 这个就是套接字类厂
owned_socket_factory_.reset(
new rtc::BasicPacketSocketFactory(network_thread_));
socket_factory_ = owned_socket_factory_.get();
}
network_thread_->Post(RTC_FROM_HERE, this, MSG_CONFIG_START);
RTC_LOG(LS_INFO) << "Start getting ports with turn_port_prune_policy "
<< turn_port_prune_policy_;
}
// 会循环执行这个函数
void BasicPortAllocatorSession::OnMessage(rtc::Message* message) {
switch (message->message_id) {
case MSG_CONFIG_START:
GetPortConfigurations();
break;
case MSG_CONFIG_READY:
OnConfigReady(static_cast(message->pdata));
break;
case MSG_ALLOCATE:
OnAllocate();
break;
case MSG_SEQUENCEOBJECTS_CREATED:
OnAllocationSequenceObjectsCreated();
break;
case MSG_CONFIG_STOP:
OnConfigStop();
break;
default:
RTC_NOTREACHED();
}
}
具体顺序执行下面函数
void BasicPortAllocatorSession::GetPortConfigurations()
void BasicPortAllocatorSession::ConfigReady(PortConfiguration* config)
void BasicPortAllocatorSession::OnConfigReady(PortConfiguration* config)
void BasicPortAllocatorSession::AllocatePorts()
void BasicPortAllocatorSession::OnAllocate()
void BasicPortAllocatorSession::DoAllocate(bool disable_equivalent)
AllocationSequence* sequence =
new AllocationSequence(this, networks[i], config, sequence_flags);
sequence->SignalPortAllocationComplete.connect(this, &BasicPortAllocatorSession::OnPortAllocationComplete);
sequence->Init();
sequence->Start();
sequences_.push_back(sequence);
// 会继续执行函数
./p2p/client/basic_port_allocator.cc
void AllocationSequence::Init()
udp_socket_.reset(session_->socket_factory()->CreateUdpSocket(
rtc::SocketAddress(network_->GetBestIP(), 0),
session_->allocator()->min_port(), session_->allocator()->max_port()));
if (udp_socket_) {
udp_socket_->SignalReadPacket.connect(this, &AllocationSequence::OnReadPacket);
void AllocationSequence::Start()
// 会循环执行这个函数
void AllocationSequence::OnMessage(rtc::Message* msg) {
RTC_DCHECK(rtc::Thread::Current() == session_->network_thread());
RTC_DCHECK(msg->message_id == MSG_ALLOCATION_PHASE);
const char* const PHASE_NAMES[kNumPhases] = {"Udp", "Relay", "Tcp"};
// Perform all of the phases in the current step.
RTC_LOG(LS_INFO) << network_->ToString() << ": Allocation Phase=" << PHASE_NAMES[phase_];
switch (phase_) {
case PHASE_UDP:
CreateUDPPorts();
CreateStunPorts();
break;
case PHASE_RELAY:
CreateRelayPorts();
break;
case PHASE_TCP:
CreateTCPPorts();
state_ = kCompleted;
break;
default:
RTC_NOTREACHED();
}
if (state() == kRunning) {
++phase_;
session_->network_thread()->PostDelayed(RTC_FROM_HERE,
session_->allocator()->step_delay(),
this, MSG_ALLOCATION_PHASE);
} else {
// If all phases in AllocationSequence are completed, no allocation
// steps needed further. Canceling pending signal.
session_->network_thread()->Clear(this, MSG_ALLOCATION_PHASE);
SignalPortAllocationComplete(this);
}
}
// 这就牵涉到套接字创建,接收,具体请参考博文 https://blog.csdn.net/freeabc/article/details/106142951
// 这些创建完毕,就是开始 stun 的探测了, 具体参考博文 https://blog.csdn.net/freeabc/article/details/106000923