在call.h 里面有定义,我们把接收到的数据调用 DeliverPacket 即可
class PacketReceiver {
public:
enum DeliveryStatus {
DELIVERY_OK,
DELIVERY_UNKNOWN_SSRC,
DELIVERY_PACKET_ERROR,
};
virtual DeliveryStatus DeliverPacket(MediaType media_type,
const uint8_t* packet,
size_t length,
const PacketTime& packet_time) = 0;
protected:
virtual ~PacketReceiver() {}
};
call.cc里面 首先判断是不是rtcp,如果是rtcp 走rtcp的流程,我们先看视频rtp的处理流程 DeliverRtp
PacketReceiver::DeliveryStatus Call::DeliverPacket(
MediaType media_type,
const uint8_t* packet,
size_t length,
const PacketTime& packet_time) {
// TODO(solenberg): Tests call this function on a network thread, libjingle
// calls on the worker thread. We should move towards always using a network
// thread. Then this check can be enabled.
// RTC_DCHECK(!configuration_thread_checker_.CalledOnValidThread());
if (RtpHeaderParser::IsRtcp(packet, length))
return DeliverRtcp(media_type, packet, length);
return DeliverRtp(media_type, packet, length, packet_time);
}
进入真正的处理过程 call.cc
首先拿到ssrc,判断媒体类型, 然后根据ssrc,找到对应的VideoReceiveStream(这个是在Call::CreateVideoReceiveStream 里创建的),
PacketReceiver::DeliveryStatus Call::DeliverRtp(MediaType media_type,
const uint8_t* packet,
size_t length,
const PacketTime& packet_time) {
TRACE_EVENT0("webrtc", "Call::DeliverRtp");
// Minimum RTP header size.
if (length < 12)
return DELIVERY_PACKET_ERROR;
uint32_t ssrc = ByteReader::ReadBigEndian(&packet[8]);
ReadLockScoped read_lock(*receive_crit_);
if (media_type == MediaType::ANY || media_type == MediaType::AUDIO) {
auto it = audio_receive_ssrcs_.find(ssrc);
if (it != audio_receive_ssrcs_.end()) {
received_bytes_per_second_counter_.Add(static_cast<int>(length));
received_audio_bytes_per_second_counter_.Add(static_cast<int>(length));
auto status = it->second->DeliverRtp(packet, length, packet_time)
? DELIVERY_OK
: DELIVERY_PACKET_ERROR;
if (status == DELIVERY_OK)
event_log_->LogRtpHeader(kIncomingPacket, media_type, packet, length);
return status;
}
}
if (media_type == MediaType::ANY || media_type == MediaType::VIDEO) {
auto it = video_receive_ssrcs_.find(ssrc);
if (it != video_receive_ssrcs_.end()) {
received_bytes_per_second_counter_.Add(static_cast<int>(length));
received_video_bytes_per_second_counter_.Add(static_cast<int>(length));
auto status = it->second->DeliverRtp(packet, length, packet_time)
? DELIVERY_OK
: DELIVERY_PACKET_ERROR;
// Deliver media packets to FlexFEC subsystem.
auto it_bounds = flexfec_receive_ssrcs_media_.equal_range(ssrc);
for (auto it = it_bounds.first; it != it_bounds.second; ++it)
it->second->AddAndProcessReceivedPacket(packet, length);
if (status == DELIVERY_OK)
event_log_->LogRtpHeader(kIncomingPacket, media_type, packet, length);
return status;
}
}
if (media_type == MediaType::ANY || media_type == MediaType::VIDEO) {
auto it = flexfec_receive_ssrcs_protection_.find(ssrc);
if (it != flexfec_receive_ssrcs_protection_.end()) {
auto status = it->second->AddAndProcessReceivedPacket(packet, length)
? DELIVERY_OK
: DELIVERY_PACKET_ERROR;
if (status == DELIVERY_OK)
event_log_->LogRtpHeader(kIncomingPacket, media_type, packet, length);
return status;
}
}
return DELIVERY_UNKNOWN_SSRC;
}
很自然进入VideoReceiveStream.cc
bool VideoReceiveStream::DeliverRtp(const uint8_t* packet,
size_t length,
const PacketTime& packet_time) {
return rtp_stream_receiver_.DeliverRtp(packet, length, packet_time);
}
进入rtp_stream_receiver.cc
在此解析rtp头,rtp_header_parser_->Parse
通知接收端带宽估计, remote_bitrate_estimator_->IncomingPacket
判断是否按照顺序,IsPacketInOrder
bool RtpStreamReceiver::DeliverRtp(const uint8_t* rtp_packet,
size_t rtp_packet_length,
const PacketTime& packet_time) {
RTC_DCHECK(remote_bitrate_estimator_);
{
rtc::CritScope lock(&receive_cs_);
if (!receiving_) {
return false;
}
}
RTPHeader header;
if (!rtp_header_parser_->Parse(rtp_packet, rtp_packet_length,
&header)) {
return false;
}
size_t payload_length = rtp_packet_length - header.headerLength;
int64_t arrival_time_ms;
int64_t now_ms = clock_->TimeInMilliseconds();
if (packet_time.timestamp != -1)
arrival_time_ms = (packet_time.timestamp + 500) / 1000;
else
arrival_time_ms = now_ms;
{
// Periodically log the RTP header of incoming packets.
rtc::CritScope lock(&receive_cs_);
if (now_ms - last_packet_log_ms_ > kPacketLogIntervalMs) {
std::stringstream ss;
ss << "Packet received on SSRC: " << header.ssrc << " with payload type: "
<< static_cast<int>(header.payloadType) << ", timestamp: "
<< header.timestamp << ", sequence number: " << header.sequenceNumber
<< ", arrival time: " << arrival_time_ms;
if (header.extension.hasTransmissionTimeOffset)
ss << ", toffset: " << header.extension.transmissionTimeOffset;
if (header.extension.hasAbsoluteSendTime)
ss << ", abs send time: " << header.extension.absoluteSendTime;
LOG(LS_INFO) << ss.str();
last_packet_log_ms_ = now_ms;
}
}
remote_bitrate_estimator_->IncomingPacket(arrival_time_ms, payload_length,
header);
header.payload_type_frequency = kVideoPayloadTypeFrequency;
bool in_order = IsPacketInOrder(header);
rtp_payload_registry_.SetIncomingPayloadType(header);
bool ret = ReceivePacket(rtp_packet, rtp_packet_length, header, in_order);
// Update receive statistics after ReceivePacket.
// Receive statistics will be reset if the payload type changes (make sure
// that the first packet is included in the stats).
rtp_receive_statistics_->IncomingPacket(
header, rtp_packet_length, IsPacketRetransmitted(header, in_order));
return ret;
}
还是进入rtp_stream_receiver.cc
按照payloadtype 判断数据包的合法性
由此进入rtp_rtcp 模块
bool RtpStreamReceiver::ReceivePacket(const uint8_t* packet,
size_t packet_length,
const RTPHeader& header,
bool in_order) {
if (rtp_payload_registry_.IsEncapsulated(header)) {
return ParseAndHandleEncapsulatingHeader(packet, packet_length, header);
}
const uint8_t* payload = packet + header.headerLength;
assert(packet_length >= header.headerLength);
size_t payload_length = packet_length - header.headerLength;
PayloadUnion payload_specific;
if (!rtp_payload_registry_.GetPayloadSpecifics(header.payloadType,
&payload_specific)) {
return false;
}
return rtp_receiver_->IncomingRtpPacket(header, payload, payload_length,
payload_specific, in_order);
}
进入rtp_receiver_impl.cc
为什么in_order 才去更新时间戳???
bool RtpReceiverImpl::IncomingRtpPacket(
const RTPHeader& rtp_header,
const uint8_t* payload,
size_t payload_length,
PayloadUnion payload_specific,
bool in_order) {
......
int32_t ret_val = rtp_media_receiver_->ParseRtpPacket(
&webrtc_rtp_header, payload_specific, is_red, payload, payload_length,
clock_->TimeInMilliseconds(), is_first_packet_in_frame);
......
if (in_order) {
if (last_received_timestamp_ != rtp_header.timestamp) {
last_received_timestamp_ = rtp_header.timestamp;
last_received_frame_time_ms_ = clock_->TimeInMilliseconds();
}
last_received_sequence_number_ = rtp_header.sequenceNumber;
}
}
return true;
}
经rtp_receiver_video.cc
RTPReceiverVideo::ParseRtpPacket
RtpStreamReceiver::OnReceivedPayloadData
VideoReceiver::IncomingPacket
进入VCMReceiver::InsertPacket
进入jitter_buffer
int32_t VCMReceiver::InsertPacket(const VCMPacket& packet) {
// Insert the packet into the jitter buffer. The packet can either be empty or
// contain media at this point.
bool retransmitted = false;
const VCMFrameBufferEnum ret =
jitter_buffer_.InsertPacket(packet, &retransmitted);
if (ret == kOldPacket) {
return VCM_OK;
} else if (ret == kFlushIndicator) {
return VCM_FLUSH_INDICATOR;
} else if (ret < 0) {
return VCM_JITTER_BUFFER_ERROR;
}
if (ret == kCompleteSession && !retransmitted) {
// We don't want to include timestamps which have suffered from
// retransmission here, since we compensate with extra retransmission
// delay within the jitter estimate.
timing_->IncomingTimestamp(packet.timestamp, clock_->TimeInMilliseconds());
}
return VCM_OK;
}
这得仔细看看jitter buffer 的重排 nack
VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(const VCMPacket& packet,
bool* retransmitted) {
CriticalSectionScoped cs(crit_sect_);
++num_packets_;
if (num_packets_ == 1) {
time_first_packet_ms_ = clock_->TimeInMilliseconds();
}
// Does this packet belong to an old frame?
if (last_decoded_state_.IsOldPacket(&packet)) {
// Account only for media packets.
if (packet.sizeBytes > 0) {
num_discarded_packets_++;
num_consecutive_old_packets_++;
if (stats_callback_ != NULL)
stats_callback_->OnDiscardedPacketsUpdated(num_discarded_packets_);
}
// Update last decoded sequence number if the packet arrived late and
// belongs to a frame with a timestamp equal to the last decoded
// timestamp.
last_decoded_state_.UpdateOldPacket(&packet);
DropPacketsFromNackList(last_decoded_state_.sequence_num());
// Also see if this old packet made more incomplete frames continuous.
FindAndInsertContinuousFramesWithState(last_decoded_state_);
if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
LOG(LS_WARNING)
<< num_consecutive_old_packets_
<< " consecutive old packets received. Flushing the jitter buffer.";
Flush();
return kFlushIndicator;
}
return kOldPacket;
}
num_consecutive_old_packets_ = 0;
VCMFrameBuffer* frame;
FrameList* frame_list;
const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
if (error != kNoError)
return error;
int64_t now_ms = clock_->TimeInMilliseconds();
// We are keeping track of the first and latest seq numbers, and
// the number of wraps to be able to calculate how many packets we expect.
if (first_packet_since_reset_) {
// Now it's time to start estimating jitter
// reset the delay estimate.
inter_frame_delay_.Reset(now_ms);
}
// Empty packets may bias the jitter estimate (lacking size component),
// therefore don't let empty packet trigger the following updates:
if (packet.frameType != kEmptyFrame) {
if (waiting_for_completion_.timestamp == packet.timestamp) {
// This can get bad if we have a lot of duplicate packets,
// we will then count some packet multiple times.
waiting_for_completion_.frame_size += packet.sizeBytes;
waiting_for_completion_.latest_packet_time = now_ms;
} else if (waiting_for_completion_.latest_packet_time >= 0 &&
waiting_for_completion_.latest_packet_time + 2000 <= now_ms) {
// A packet should never be more than two seconds late
UpdateJitterEstimate(waiting_for_completion_, true);
waiting_for_completion_.latest_packet_time = -1;
waiting_for_completion_.frame_size = 0;
waiting_for_completion_.timestamp = 0;
}
}
VCMFrameBufferStateEnum previous_state = frame->GetState();
// Insert packet.
FrameData frame_data;
frame_data.rtt_ms = rtt_ms_;
frame_data.rolling_average_packets_per_frame = average_packets_per_frame_;
VCMFrameBufferEnum buffer_state =
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
if (previous_state != kStateComplete) {
TRACE_EVENT_ASYNC_BEGIN1("webrtc", "Video", frame->TimeStamp(), "timestamp",
frame->TimeStamp());
}
if (buffer_state > 0) {
incoming_bit_count_ += packet.sizeBytes << 3;
if (first_packet_since_reset_) {
latest_received_sequence_number_ = packet.seqNum;
first_packet_since_reset_ = false;
} else {
if (IsPacketRetransmitted(packet)) {
frame->IncrementNackCount();
}
if (!UpdateNackList(packet.seqNum) &&
packet.frameType != kVideoFrameKey) {
buffer_state = kFlushIndicator;
}
latest_received_sequence_number_ =
LatestSequenceNumber(latest_received_sequence_number_, packet.seqNum);
}
}
// Is the frame already in the decodable list?
bool continuous = IsContinuous(*frame);
switch (buffer_state) {
case kGeneralError:
case kTimeStampError:
case kSizeError: {
RecycleFrameBuffer(frame);
break;
}
case kCompleteSession: {
if (previous_state != kStateDecodable &&
previous_state != kStateComplete) {
CountFrame(*frame);
if (continuous) {
// Signal that we have a complete session.
frame_event_->Set();
}
}
FALLTHROUGH();
}
// Note: There is no break here - continuing to kDecodableSession.
case kDecodableSession: {
*retransmitted = (frame->GetNackCount() > 0);
if (continuous) {
decodable_frames_.InsertFrame(frame);
FindAndInsertContinuousFrames(*frame);
} else {
incomplete_frames_.InsertFrame(frame);
// If NACKs are enabled, keyframes are triggered by |GetNackList|.
if (nack_mode_ == kNoNack &&
NonContinuousOrIncompleteDuration() >
90 * kMaxDiscontinuousFramesTime) {
return kFlushIndicator;
}
}
break;
}
case kIncomplete: {
if (frame->GetState() == kStateEmpty &&
last_decoded_state_.UpdateEmptyFrame(frame)) {
RecycleFrameBuffer(frame);
return kNoError;
} else {
incomplete_frames_.InsertFrame(frame);
// If NACKs are enabled, keyframes are triggered by |GetNackList|.
if (nack_mode_ == kNoNack &&
NonContinuousOrIncompleteDuration() >
90 * kMaxDiscontinuousFramesTime) {
return kFlushIndicator;
}
}
break;
}
case kNoError:
case kOutOfBoundsPacket:
case kDuplicatePacket: {
// Put back the frame where it came from.
if (frame_list != NULL) {
frame_list->InsertFrame(frame);
} else {
RecycleFrameBuffer(frame);
}
++num_duplicated_packets_;
break;
}
case kFlushIndicator:
RecycleFrameBuffer(frame);
return kFlushIndicator;
default:
assert(false);
}
return buffer_state;
}
都在这里了,
// Does this packet belong to an old frame?
if (last_decoded_state_.IsOldPacket(&packet))
首先判断是不是已经过时的了
如果是正常的数据
const VCMFrameBufferEnum error = GetFrame(packet, &frame, &frame_list);
这个函数值的看看
// Gets frame to use for this timestamp. If no match, get empty frame.
VCMFrameBufferEnum VCMJitterBuffer::GetFrame(const VCMPacket& packet,
VCMFrameBuffer** frame,
FrameList** frame_list) {
*frame = incomplete_frames_.PopFrame(packet.timestamp);
if (*frame != NULL) {
*frame_list = &incomplete_frames_;
return kNoError;
}
*frame = decodable_frames_.PopFrame(packet.timestamp);
if (*frame != NULL) {
*frame_list = &decodable_frames_;
return kNoError;
}
*frame_list = NULL;
// No match, return empty frame.
*frame = GetEmptyFrame();
if (*frame == NULL) {
// No free frame! Try to reclaim some...
LOG(LS_WARNING) << "Unable to get empty frame; Recycling.";
bool found_key_frame = RecycleFramesUntilKeyFrame();
*frame = GetEmptyFrame();
RTC_CHECK(*frame);
if (!found_key_frame) {
RecycleFrameBuffer(*frame);
return kFlushIndicator;
}
}
(*frame)->Reset();
return kNoError;
}
看看这个rtp包是不是之前帧的一部分(同一帧的rtp timestamp 是一样的, 包序号不一样),如果是则返回列表,插入包
如果不是则返回一个新的帧,插入包
插入包的调用
VCMFrameBufferEnum buffer_state =
frame->InsertPacket(packet, now_ms, decode_error_mode_, frame_data);
经过VCMFrameBufferEnum VCMFrameBuffer::InsertPacket
进入VCMSessionInfo::InsertPacket
int VCMSessionInfo::InsertPacket(const VCMPacket& packet,
uint8_t* frame_buffer,
VCMDecodeErrorMode decode_error_mode,
const FrameData& frame_data) {
if (packet.frameType == kEmptyFrame) {
// Update sequence number of an empty packet.
// Only media packets are inserted into the packet list.
InformOfEmptyPacket(packet.seqNum);
return 0;
}
if (packets_.size() == kMaxPacketsInSession) {
LOG(LS_ERROR) << "Max number of packets per frame has been reached.";
return -1;
}
// Find the position of this packet in the packet list in sequence number
// order and insert it. Loop over the list in reverse order.
ReversePacketIterator rit = packets_.rbegin();
for (; rit != packets_.rend(); ++rit)
if (LatestSequenceNumber(packet.seqNum, (*rit).seqNum) == packet.seqNum)
break;
// Check for duplicate packets.
if (rit != packets_.rend() && (*rit).seqNum == packet.seqNum &&
(*rit).sizeBytes > 0)
return -2;
if (packet.codec == kVideoCodecH264) {
frame_type_ = packet.frameType;
if (packet.isFirstPacket &&
(first_packet_seq_num_ == -1 ||
IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum))) {
first_packet_seq_num_ = packet.seqNum;
}
if (packet.markerBit &&
(last_packet_seq_num_ == -1 ||
IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_))) {
last_packet_seq_num_ = packet.seqNum;
}
} else {
// Only insert media packets between first and last packets (when
// available).
// Placing check here, as to properly account for duplicate packets.
// Check if this is first packet (only valid for some codecs)
// Should only be set for one packet per session.
if (packet.isFirstPacket && first_packet_seq_num_ == -1) {
// The first packet in a frame signals the frame type.
frame_type_ = packet.frameType;
// Store the sequence number for the first packet.
first_packet_seq_num_ = static_cast<int>(packet.seqNum);
} else if (first_packet_seq_num_ != -1 &&
IsNewerSequenceNumber(first_packet_seq_num_, packet.seqNum)) {
LOG(LS_WARNING) << "Received packet with a sequence number which is out "
"of frame boundaries";
return -3;
} else if (frame_type_ == kEmptyFrame && packet.frameType != kEmptyFrame) {
// Update the frame type with the type of the first media packet.
// TODO(mikhal): Can this trigger?
frame_type_ = packet.frameType;
}
// Track the marker bit, should only be set for one packet per session.
if (packet.markerBit && last_packet_seq_num_ == -1) {
last_packet_seq_num_ = static_cast<int>(packet.seqNum);
} else if (last_packet_seq_num_ != -1 &&
IsNewerSequenceNumber(packet.seqNum, last_packet_seq_num_)) {
LOG(LS_WARNING) << "Received packet with a sequence number which is out "
"of frame boundaries";
return -3;
}
}
// The insert operation invalidates the iterator |rit|.
PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
size_t returnLength = InsertBuffer(frame_buffer, packet_list_it);
UpdateCompleteSession();
if (decode_error_mode == kWithErrors)
decodable_ = true;
else if (decode_error_mode == kSelectiveErrors)
UpdateDecodableSession(frame_data);
return static_cast<int>(returnLength);
}
这个代码注释比较明确了,找好位置插入
// The insert operation invalidates the iterator |rit|.
PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
这个是处理视频数据,看里面注释
size_t returnLength = InsertBuffer(frame_buffer, packet_list_it);
UpdateCompleteSession();
顺便判断一下是否完整的一帧
size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
PacketIterator packet_it) {
VCMPacket& packet = *packet_it;
PacketIterator it;
// Calculate the offset into the frame buffer for this packet.
size_t offset = 0;
for (it = packets_.begin(); it != packet_it; ++it)
offset += (*it).sizeBytes;
// Set the data pointer to pointing to the start of this packet in the
// frame buffer.
const uint8_t* packet_buffer = packet.dataPtr;
packet.dataPtr = frame_buffer + offset;
// We handle H.264 STAP-A packets in a special way as we need to remove the
// two length bytes between each NAL unit, and potentially add start codes.
// TODO(pbos): Remove H264 parsing from this step and use a fragmentation
// header supplied by the H264 depacketizer.
const size_t kH264NALHeaderLengthInBytes = 1;
const size_t kLengthFieldLength = 2;
if (packet.video_header.codec == kRtpVideoH264 &&
packet.video_header.codecHeader.H264.packetization_type == kH264StapA) {
size_t required_length = 0;
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
size_t length = BufferToUWord16(nalu_ptr);
required_length +=
length + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
nalu_ptr += kLengthFieldLength + length;
}
ShiftSubsequentPackets(packet_it, required_length);
nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
uint8_t* frame_buffer_ptr = frame_buffer + offset;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
size_t length = BufferToUWord16(nalu_ptr);
nalu_ptr += kLengthFieldLength;
frame_buffer_ptr += Insert(nalu_ptr, length, packet.insertStartCode,
const_cast(frame_buffer_ptr));
nalu_ptr += length;
}
packet.sizeBytes = required_length;
return packet.sizeBytes;
}
ShiftSubsequentPackets(
packet_it, packet.sizeBytes +
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0));
packet.sizeBytes =
Insert(packet_buffer, packet.sizeBytes, packet.insertStartCode,
const_cast(packet.dataPtr));
return packet.sizeBytes;
}
就是替换h264 nalu rtp封装的格式 ,换成anexb的规范 startcode 0x00 00 00 01