为了了解google trendline估计分析。我们以webrtc的trendline_estimator_unittest.cc文件为入口,可以更好的了解其对应功能和入参含义。
trendline estimator的测试用例:
// 网络正常情况,发送和接收端分别对应的发送和接受时间一致
TEST_F(TrendlineEstimatorTest, Normal)
// 网络过载情况,接收端以10%的延后时间接收数据
TEST_F(TrendlineEstimatorTest, Overusing)
// 网络未饱和,接收端以15%的提前时间接收数据
TEST_F(TrendlineEstimatorTest, Underusing)
// 小包发送,网络未饱和,测试用例中测试包大小为100字节,小于设定的1200字节包大小
TEST_F(TrendlineEstimatorTest, IncludesSmallPacketsByDefault)
测试用例函数体,通过测试函数体,我们可以了解到对于TrendlineEstimator需要的入参含义。
void RunTestUntilStateChange() {
RTC_DCHECK_EQ(send_times.size(), kPacketCount);
RTC_DCHECK_EQ(recv_times.size(), kPacketCount);
RTC_DCHECK_EQ(packet_sizes.size(), kPacketCount);
RTC_DCHECK_GE(count, 1);
RTC_DCHECK_LT(count, kPacketCount);
auto initial_state = estimator.State();
for (; count < kPacketCount; count++) {
// 计算前后两次接收的间隔时间 ms
double recv_delta = recv_times[count] - recv_times[count - 1];
// 计算前后两次发送的间隔时间 ms
double send_delta = send_times[count] - send_times[count - 1];
/* 传参:
recv_delta,与上个包的接收间隔时间,ms
send_delta,与上个包的发送间隔时间,ms
send_times[count],当前包的发送时间,ms
recv_times[count],当前包的接收时间,ms
packet_sizes[count],当前接收包的大小,字节
*/
estimator.Update(recv_delta, send_delta, send_times[count],
recv_times[count], packet_sizes[count], true);
if (estimator.State() != initial_state) {
// 预测器发现网络状态发生变化,退出测试
return;
}
}
}
从测试用例中,我们发现,对于Trendline,提供了个重要的外部接口:
/**
* 更新输入采样,
* recv_delta_ms,与上个包组的接收间隔时间,ms(接收端计算,需要回传)
* send_delta_ms,与上个包组的发送间隔时间,ms (发送端计算)
* send_time_ms, 当前包组的发送时间,ms (发送端计算)
* arrival_time_ms,当前包组的接收时间,ms (接收端计算,需要回传)
* packet_size,当前接收包组的大小,字节
* calculated_deltas,测试用例中传递true
*/
void TrendlineEstimator::Update(double recv_delta_ms,
double send_delta_ms,
int64_t send_time_ms,
int64_t arrival_time_ms,
size_t packet_size,
bool calculated_deltas);
enum class BandwidthUsage {
kBwNormal = 0, //正常
kBwUnderusing = 1,//未饱和
kBwOverusing = 2,//过载
kLast
};
/**
* 获取当前网络状况
*/
BandwidthUsage State() const override;
void TrendlineEstimator::Update(double recv_delta_ms,
double send_delta_ms,
int64_t send_time_ms,
int64_t arrival_time_ms,
size_t packet_size,
bool calculated_deltas) {
if (calculated_deltas) {
UpdateTrendline(recv_delta_ms, send_delta_ms, send_time_ms, arrival_time_ms,
packet_size);
}
if (network_state_predictor_) {
hypothesis_predicted_ = network_state_predictor_->Update(
send_time_ms, arrival_time_ms, hypothesis_);
}
}
void TrendlineEstimator::UpdateTrendline(double recv_delta_ms,
double send_delta_ms,
int64_t send_time_ms,
int64_t arrival_time_ms,
size_t packet_size) {
// 接收延迟,值为接收间隔与发送间隔的差值
const double delta_ms = recv_delta_ms - send_delta_ms;
// 延迟统计加1
++num_of_deltas_;
num_of_deltas_ = std::min(num_of_deltas_, kDeltaCounterMax);//最大值为1000
// 第一个包的接收时间
if (first_arrival_time_ms_ == -1)
first_arrival_time_ms_ = arrival_time_ms;
// Exponential backoff filter.
// 指数退避
// 延迟的累计
accumulated_delay_ += delta_ms;
BWE_TEST_LOGGING_PLOT(1, "accumulated_delay_ms", arrival_time_ms,
accumulated_delay_);
// 平滑后的延迟累加值,平滑因子为0.9
smoothed_delay_ = smoothing_coef_ * smoothed_delay_ +
(1 - smoothing_coef_) * accumulated_delay_;
BWE_TEST_LOGGING_PLOT(1, "smoothed_delay_ms", arrival_time_ms,
smoothed_delay_);
// Maintain packet window
// arrival_time_ms - first_arrival_time_ms_: 当前包与第一个包的相对时间,可以理解为到达时间
// 将统计采样数据(到达时间,平滑后延迟累计时间,当前延迟累计时间)放入队列中保存
delay_hist_.emplace_back(
static_cast(arrival_time_ms - first_arrival_time_ms_),
smoothed_delay_, accumulated_delay_);
// 根据到达时间从低到高排序
if (settings_.enable_sort) {
for (size_t i = delay_hist_.size() - 1;
i > 0 &&
delay_hist_[i].arrival_time_ms < delay_hist_[i - 1].arrival_time_ms;
--i) {
std::swap(delay_hist_[i], delay_hist_[i - 1]);
}
}
// 限制采样窗口大小
if (delay_hist_.size() > settings_.window_size)
delay_hist_.pop_front();
// Simple linear regression.
// 简单线性回归
double trend = prev_trend_;
// 当队列delay_hist_大小等于设定的窗口大小时,开始进行延迟变化趋势计算,得到直线斜率
if (delay_hist_.size() == settings_.window_size) {
// Update trend_ if it is possible to fit a line to the data. The delay
// trend can be seen as an estimate of (send_rate - capacity)/capacity.
// 0 < trend < 1 -> the delay increases, queues are filling up
// trend == 0 -> the delay does not change
// trend < 0 -> the delay decreases, queues are being emptied
// 斜率可以理解为:发送速率与实际带宽的增长量
trend = LinearFitSlope(delay_hist_).value_or(trend);
if (settings_.enable_cap) {
absl::optional cap = ComputeSlopeCap(delay_hist_, settings_);
// We only use the cap to filter out overuse detections, not
// to detect additional underuses.
if (trend >= 0 && cap.has_value() && trend > cap.value()) {
trend = cap.value();
}
}
}
BWE_TEST_LOGGING_PLOT(1, "trendline_slope", arrival_time_ms, trend);
// 得到延迟梯度后,进行网络检测
Detect(trend, send_delta_ms, arrival_time_ms);
}
accumulated_delay_:当前延迟累计时间
smoothed_delay_:平滑后的迟累计时间
struct PacketTiming {
double arrival_time_ms;//到达时间
double smoothed_delay_ms;//平滑后延迟累计时间
double raw_delay_ms;//当前延迟累计时间
};
absl::optional LinearFitSlope(
const std::deque& packets) {
RTC_DCHECK(packets.size() >= 2);//统计采样至少为2个
// Compute the "center of mass".
// 线性回归公式:y = k * x + b
// x : 包组达到时间
// y : 平滑累计延迟
double sum_x = 0;
double sum_y = 0;
for (const auto& packet : packets) {
sum_x += packet.arrival_time_ms;
sum_y += packet.smoothed_delay_ms;
}
double x_avg = sum_x / packets.size();// x均值
double y_avg = sum_y / packets.size();// y均值
// Compute the slope k = \sum (x_i-x_avg)(y_i-y_avg) / \sum (x_i-x_avg)^2
// 计算斜率 k = \sum (x_i-x_avg)(y_i-y_avg) / \sum (x_i-x_avg)^2
double numerator = 0;
double denominator = 0;
for (const auto& packet : packets) {
double x = packet.arrival_time_ms;
double y = packet.smoothed_delay_ms;
numerator += (x - x_avg) * (y - y_avg);
denominator += (x - x_avg) * (x - x_avg);
}
if (denominator == 0)
return absl::nullopt;
//返回延迟变化趋势直线斜率k,delay的斜率;
//>0:网络发生拥塞;
//=0:发送速率正好符合当前带宽;
//<=网络未饱和;
return numerator / denominator;
}
/**
* trend : 延迟变化趋势直线斜率
* ts_delta : 与上个包组的发送间隔时间
* now_ms : 包组到达时间
*/
void TrendlineEstimator::Detect(double trend, double ts_delta, int64_t now_ms) {
if (num_of_deltas_ < 2) {
//统计采样至少为2个
hypothesis_ = BandwidthUsage::kBwNormal;
return;
}
// kMinNumDeltas : 60
// trend : 传入的斜率值
// threshold_gain_ : 4.0
const double modified_trend =
std::min(num_of_deltas_, kMinNumDeltas) * trend * threshold_gain_;
prev_modified_trend_ = modified_trend;
BWE_TEST_LOGGING_PLOT(1, "T", now_ms, modified_trend);
BWE_TEST_LOGGING_PLOT(1, "threshold", now_ms, threshold_);
// threshold_初始值为12.5
/**
* 与一个动态阈值threshold_作对比,从而得到网络状态
* modified_trend > threshold_,表示overuse状态
* modified_trend < -threshold_,表示underuse状态
* -threshold_ <= modified_trend <= threshold_,表示normal状态
*/
if (modified_trend > threshold_) {
if (time_over_using_ == -1) {
// Initialize the timer. Assume that we've been
// over-using half of the time since the previous
// sample.
time_over_using_ = ts_delta / 2;
} else {
// Increment timer
time_over_using_ += ts_delta;
}
overuse_counter_++;
if (time_over_using_ > overusing_time_threshold_ && overuse_counter_ > 1) {
if (trend >= prev_trend_) {
time_over_using_ = 0;
overuse_counter_ = 0;
hypothesis_ = BandwidthUsage::kBwOverusing;
}
}
} else if (modified_trend < -threshold_) {
time_over_using_ = -1;
overuse_counter_ = 0;
hypothesis_ = BandwidthUsage::kBwUnderusing;
} else {
time_over_using_ = -1;
overuse_counter_ = 0;
hypothesis_ = BandwidthUsage::kBwNormal;
}
prev_trend_ = trend;
// 阈值threshold_是动态调整的,代码实现在UpdateThreshold函数中
UpdateThreshold(modified_trend, now_ms);
}
/**
阈值自适应调整为了改变算法对延迟梯度的敏感度。主要有以下两方面原因:
1)延迟梯度是变化的,有时很大,有时很小,如果阈值是固定的,对于延迟梯度来说可能太多或者太小,这样就会出现不够敏感,
无法检测到网络拥塞,或者过于敏感,导致一直检测为网络拥塞;
2)固定的阈值会导致与TCP(采用基于丢包的拥塞控制)的竞争中被饿死。
*/
void TrendlineEstimator::UpdateThreshold(double modified_trend,
int64_t now_ms) {
if (last_update_ms_ == -1)
last_update_ms_ = now_ms;
// kMaxAdaptOffsetMs : 15.0
if (fabs(modified_trend) > threshold_ + kMaxAdaptOffsetMs) {
// Avoid adapting the threshold to big latency spikes, caused e.g.,
// by a sudden capacity drop.
last_update_ms_ = now_ms;
return;
}
// k_down_ : 0.039
// k_up_ : 0.0087
// kγ(ti)值:kd,ku
const double k = fabs(modified_trend) < threshold_ ? k_down_ : k_up_;
const int64_t kMaxTimeDeltaMs = 100;
// 距离上一次阈值更新经过的时间∆T
int64_t time_delta_ms = std::min(now_ms - last_update_ms_, kMaxTimeDeltaMs);
threshold_ += k * (fabs(modified_trend) - threshold_) * time_delta_ms;
threshold_ = rtc::SafeClamp(threshold_, 6.f, 600.f);
last_update_ms_ = now_ms;
}
包组延迟是通过InterArrival类来统计计算的
为了解InterArrival的用法和传递参数的含义,分析其测试代码modules/remote_bitrate_estimator/inter_arrival_unittest.cc。
TEST_F(InterArrivalTest, OutOfOrderPacket) {
// G1
int64_t arrival_time = 17;//数据到达时间,ms
int64_t timestamp = 0;//数据包的时间戳,发送时间,us。
ExpectFalse(timestamp, arrival_time, 1);
int64_t g1_timestamp = timestamp;
int64_t g1_arrival_time = arrival_time;
// G2
//同一时刻发送11个数据包,当前包组比起上个包组发送时间超过5ms,到达时间相差11毫秒
//各个包发送时间相差20us,相差很小
//各个包到达时间相差6ms
arrival_time += 11;//数据包到达时间,ms
timestamp += kTriggerNewGroupUs;//数据包的时间戳,us。比G1包组大5ms,区分两个包组
ExpectFalse(timestamp, 28, 2);
for (int i = 0; i < 10; ++i) {
arrival_time += kBurstThresholdMs + 1;
timestamp += kMinStep;//20us
ExpectFalse(timestamp, arrival_time, 1);
}
int64_t g2_timestamp = timestamp;//赋值最后一个数据包的发送时间戳
int64_t g2_arrival_time = arrival_time;//赋值最后一个数据包的接收时间戳
// This packet is out of order and should be dropped.
//第1组数据包未按照顺序到达,需要丢弃
arrival_time = 281;
ExpectFalse(g1_timestamp, arrival_time, 100);
// G3
//第3组数据包到达,到达时间为500ms,当前包组比起上个包组发送时间超过5ms
arrival_time = 500;
timestamp = 2 * kTriggerNewGroupUs;
ExpectTrue(timestamp, arrival_time, 100,
// Delta G2-G1
// 第2与第1组数据包组的发送和到达时间差值
g2_timestamp - g1_timestamp, g2_arrival_time - g1_arrival_time,
// 第2与第1组数据包组数据大小的差值
(2 + 10) - 1, 0);
}
// Test that neither inter_arrival instance complete the timestamp group from
// the given data.
void ExpectFalse(int64_t timestamp_us,
int64_t arrival_time_ms,
size_t packet_size) {
InternalExpectFalse(inter_arrival_rtp_.get(),
MakeRtpTimestamp(timestamp_us), arrival_time_ms,
packet_size);
InternalExpectFalse(inter_arrival_ast_.get(), MakeAbsSendTime(timestamp_us),
arrival_time_ms, packet_size);
}
// Test that both inter_arrival instances complete the timestamp group from
// the given data and that all returned deltas are as expected (except
// timestamp delta, which is rounded from us to different ranges and must
// match within an interval, given in |timestamp_near].
void ExpectTrue(int64_t timestamp_us,
int64_t arrival_time_ms,
size_t packet_size,
int64_t expected_timestamp_delta_us,
int64_t expected_arrival_time_delta_ms,
int expected_packet_size_delta,
uint32_t timestamp_near) {
//us转换成rtp形式的时间戳
InternalExpectTrue(inter_arrival_rtp_.get(), MakeRtpTimestamp(timestamp_us),
arrival_time_ms, packet_size,
MakeRtpTimestamp(expected_timestamp_delta_us),
expected_arrival_time_delta_ms,
expected_packet_size_delta, timestamp_near);
//参考AbsoluteSendTime::MsTo24Bits(now_ms)
InternalExpectTrue(inter_arrival_ast_.get(), MakeAbsSendTime(timestamp_us),
arrival_time_ms, packet_size,
MakeAbsSendTime(expected_timestamp_delta_us),
expected_arrival_time_delta_ms,
expected_packet_size_delta, timestamp_near << 8);
}
/**
timestamp:发送时间戳
arrival_time_ms:到达时间
packet_size:数据包大小
*/
static void InternalExpectFalse(InterArrival* inter_arrival,
uint32_t timestamp,
int64_t arrival_time_ms,
size_t packet_size) {
uint32_t dummy_timestamp = 101;
int64_t dummy_arrival_time_ms = 303;
int dummy_packet_size = 909;
bool computed = inter_arrival->ComputeDeltas(
timestamp, arrival_time_ms, arrival_time_ms, packet_size,
&dummy_timestamp, &dummy_arrival_time_ms, &dummy_packet_size);
EXPECT_EQ(computed, false);
EXPECT_EQ(101ul, dummy_timestamp);
EXPECT_EQ(303, dummy_arrival_time_ms);
EXPECT_EQ(909, dummy_packet_size);
}
/**
timestamp:发送时间戳
arrival_time_ms:到达时间戳
packet_size:数据包大小
expected_timestamp_delta:期望发送时间戳差值
expected_arrival_time_delta_ms:期望到达时间戳差值
expected_packet_size_delta:期望数据包大小差值
timestamp_near:为0
*/
static void InternalExpectTrue(InterArrival* inter_arrival,
uint32_t timestamp,
int64_t arrival_time_ms,
size_t packet_size,
uint32_t expected_timestamp_delta,
int64_t expected_arrival_time_delta_ms,
int expected_packet_size_delta,
uint32_t timestamp_near) {
uint32_t delta_timestamp = 101;//发送时间差值
int64_t delta_arrival_time_ms = 303;//到达时间差值
int delta_packet_size = 909;//数据包大小差值
bool computed = inter_arrival->ComputeDeltas(
timestamp, arrival_time_ms, arrival_time_ms, packet_size,
&delta_timestamp, &delta_arrival_time_ms, &delta_packet_size);
EXPECT_EQ(true, computed);
EXPECT_NEAR(expected_timestamp_delta, delta_timestamp, timestamp_near);
EXPECT_EQ(expected_arrival_time_delta_ms, delta_arrival_time_ms);
EXPECT_EQ(expected_packet_size_delta, delta_packet_size);
}
InterArrival根据输入的数据包发送、接收时间和大小。计算包组的发送、接收和大小的差值。内部自动区分包组信息。
数据输入接口:InterArrival::ComputeDeltas
/**
new group的判断,以及group时间的更新和差值计算。
timestamp:数据包发送时间戳,单位不明确
arrival_time_ms:数据包到达时间,ms
system_time_ms:系统时间,测试用例中与arrival_time_ms相等
packet_size:数据包大小
timestamp_delta:发送时间戳差值,输出
arrival_time_delta_ms:到达时间差值,ms,输出
packet_size_delta:数据包差值
*/
bool InterArrival::ComputeDeltas(uint32_t timestamp,
int64_t arrival_time_ms,
int64_t system_time_ms,
size_t packet_size,
uint32_t* timestamp_delta,
int64_t* arrival_time_delta_ms,
int* packet_size_delta) {
assert(timestamp_delta != NULL);
assert(arrival_time_delta_ms != NULL);
assert(packet_size_delta != NULL);
bool calculated_deltas = false;
if (current_timestamp_group_.IsFirstPacket()) {
// We don't have enough data to update the filter, so we store it until we
// have two frames of data to process.
current_timestamp_group_.timestamp = timestamp;
current_timestamp_group_.first_timestamp = timestamp;
current_timestamp_group_.first_arrival_ms = arrival_time_ms;
} else if (!PacketInOrder(timestamp)) {
return false;
} else if (NewTimestampGroup(arrival_time_ms, timestamp)) {
// First packet of a later frame, the previous frame sample is ready.
// 需要新开一个group,此时计算prev_group和current_group的差值。
if (prev_timestamp_group_.complete_time_ms >= 0) {
*timestamp_delta =
current_timestamp_group_.timestamp - prev_timestamp_group_.timestamp;
*arrival_time_delta_ms = current_timestamp_group_.complete_time_ms -
prev_timestamp_group_.complete_time_ms;
// Check system time differences to see if we have an unproportional jump
// in arrival time. In that case reset the inter-arrival computations.
int64_t system_time_delta_ms =
current_timestamp_group_.last_system_time_ms -
prev_timestamp_group_.last_system_time_ms;
if (*arrival_time_delta_ms - system_time_delta_ms >=
kArrivalTimeOffsetThresholdMs) {
RTC_LOG(LS_WARNING)
<< "The arrival time clock offset has changed (diff = "
<< *arrival_time_delta_ms - system_time_delta_ms
<< " ms), resetting.";
Reset();
return false;
}
if (*arrival_time_delta_ms < 0) {
// The group of packets has been reordered since receiving its local
// arrival timestamp.
++num_consecutive_reordered_packets_;
if (num_consecutive_reordered_packets_ >= kReorderedResetThreshold) {
RTC_LOG(LS_WARNING)
<< "Packets are being reordered on the path from the "
"socket to the bandwidth estimator. Ignoring this "
"packet for bandwidth estimation, resetting.";
Reset();
}
return false;
} else {
num_consecutive_reordered_packets_ = 0;
}
assert(*arrival_time_delta_ms >= 0);
*packet_size_delta = static_cast(current_timestamp_group_.size) -
static_cast(prev_timestamp_group_.size);
calculated_deltas = true;
}
prev_timestamp_group_ = current_timestamp_group_;
// The new timestamp is now the current frame.
current_timestamp_group_.first_timestamp = timestamp;
current_timestamp_group_.timestamp = timestamp;
current_timestamp_group_.first_arrival_ms = arrival_time_ms;
current_timestamp_group_.size = 0;
} else {
current_timestamp_group_.timestamp =
LatestTimestamp(current_timestamp_group_.timestamp, timestamp);
}
// Accumulate the frame size.
current_timestamp_group_.size += packet_size;
current_timestamp_group_.complete_time_ms = arrival_time_ms;
current_timestamp_group_.last_system_time_ms = system_time_ms;
return calculated_deltas;
}
InterArrival::NewTimestampGroup
// Assumes that |timestamp| is not reordered compared to
// |current_timestamp_group_|.
/**
WebRTC的实现中, 并不是单纯的测量单个数据包彼此之间的延迟梯度,
而是将数据包按发送时间间隔和到达时间间隔分组,计算组间的整体延迟梯度。
分组规则是:
1、发送时间间隔小于5ms的数据包被归为一组
由于WebRTC的发送端实现了一个平滑发送模块,该模块的发送间隔是5ms发送一批数据包。
2、到达时间间隔小于5ms的数据包被归为一组
由于在wifi网络下,某些wifi设备的转发模式是,在某个固定时间片内才有机会转发数据包,
这个时间片的间隔可能长达100ms, 造成的结果是100ms的数据包堆积,并在发送时形成burst,
这个busrt内的所有数据包就会被视为一组。
*/
bool InterArrival::NewTimestampGroup(int64_t arrival_time_ms,
uint32_t timestamp) const {
if (current_timestamp_group_.IsFirstPacket()) {
return false;
} else if (BelongsToBurst(arrival_time_ms, timestamp)) {
return false;
} else {
//根根据发送时间戳是否超过5ms判断是否为一个新的group
uint32_t timestamp_diff =
timestamp - current_timestamp_group_.first_timestamp;
return timestamp_diff > kTimestampGroupLengthTicks;
}
}
InterArrival::BelongsToBurst
/**
到达时间间隔小于5ms的数据包被归为一组
由于在wifi网络下,某些wifi设备的转发模式是,在某个固定时间片内才有机会转发数据包,
这个时间片的间隔可能长达100ms, 造成的结果是100ms的数据包堆积,并在发送时形成burst,
这个busrt内的所有数据包就会被视为一组。
*/
bool InterArrival::BelongsToBurst(int64_t arrival_time_ms,
uint32_t timestamp) const {
if (!burst_grouping_) {
return false;
}
assert(current_timestamp_group_.complete_time_ms >= 0);
int64_t arrival_time_delta_ms =
arrival_time_ms - current_timestamp_group_.complete_time_ms;
uint32_t timestamp_diff = timestamp - current_timestamp_group_.timestamp;
int64_t ts_delta_ms = timestamp_to_ms_coeff_ * timestamp_diff + 0.5;
if (ts_delta_ms == 0)
return true;
int propagation_delta_ms = arrival_time_delta_ms - ts_delta_ms;
if (propagation_delta_ms < 0 &&
arrival_time_delta_ms <= kBurstDeltaThresholdMs &&
arrival_time_ms - current_timestamp_group_.first_arrival_ms <
kMaxBurstDurationMs)
return true;
return false;
}
TrendlineEstimator对象是作为成员变量存在于DelayBasedBwe中,Update接口调用的代码入口如下:
void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback,
Timestamp at_time) {
uint32_t send_time_24bits =
static_cast(
((static_cast(packet_feedback.sent_packet.send_time.ms())
<< kAbsSendTimeFraction) +
500) /
1000) &
0x00FFFFFF;
// Shift up send time to use the full 32 bits that inter_arrival works with,
// so wrapping works properly.
uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift;
uint32_t timestamp_delta = 0;
int64_t recv_delta_ms = 0;
int size_delta = 0;
bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas(
timestamp, packet_feedback.receive_time.ms(), at_time.ms(),
packet_size.bytes(), ×tamp_delta, &recv_delta_ms, &size_delta);
double send_delta_ms = (1000.0 * timestamp_delta) / (1 << kInterArrivalShift);
delay_detector_for_packet->Update(recv_delta_ms, send_delta_ms,
packet_feedback.sent_packet.send_time.ms(),
packet_feedback.receive_time.ms(),
packet_size.bytes(), calculated_deltas);
}
具体了解码率控制的整体流程,可以参考DelayBasedBwe的测试用例delay_based_bwe_unittest.cc进行分析
数据到达延迟,高码率:
TEST_F(DelayBasedBweTest, ProbeDetectionSlowerArrivalHighBitrate) {
int64_t now_ms = clock_.TimeInMilliseconds();
// Burst sent at 8 * 1000 / 1 = 8000 kbps.
// Arriving at 8 * 1000 / 2 = 4000 kbps.
// Since the receive rate is significantly below the send rate, we expect to
// use 95% of the estimated capacity.
int64_t send_time_ms = 0;
for (int i = 0; i < kNumProbesCluster1; ++i) {
clock_.AdvanceTimeMilliseconds(2);//调整时钟,数值为2ms
send_time_ms += 1;//每隔1ms发送一个数据包
now_ms = clock_.TimeInMilliseconds();//每隔2ms接收到一个数据包
// const PacedPacketInfo kPacingInfo1(1, 8, 4000);
IncomingFeedback(now_ms, send_time_ms, 1000, kPacingInfo1);
}
EXPECT_TRUE(bitrate_observer_.updated());
EXPECT_NEAR(bitrate_observer_.latest_bitrate(),
kTargetUtilizationFraction * 4000000u, 10000u);
}
/**
arrival_time_ms : 到达时间
send_time_ms : 发送时间
payload_size : 数据包大小
pacing_info : pacer sender相关信息,暂时不分析
*/
void DelayBasedBweTest::IncomingFeedback(int64_t arrival_time_ms,
int64_t send_time_ms,
size_t payload_size,
const PacedPacketInfo& pacing_info) {
//检测到达时间戳是否大于0
//arrival_time_offset_ms_初始值为0,暂不分析
RTC_CHECK_GE(arrival_time_ms + arrival_time_offset_ms_, 0);
PacketResult packet; //用于包含数据包的信息
//赋值接收时间
packet.receive_time =
Timestamp::Millis(arrival_time_ms + arrival_time_offset_ms_);
//赋值发送时间
packet.sent_packet.send_time = Timestamp::Millis(send_time_ms);
//赋值数据包大小
packet.sent_packet.size = DataSize::Bytes(payload_size);
packet.sent_packet.pacing_info = pacing_info;
if (packet.sent_packet.pacing_info.probe_cluster_id !=
PacedPacketInfo::kNotAProbe)
probe_bitrate_estimator_->HandleProbeAndEstimateBitrate(packet);
TransportPacketsFeedback msg;
//反馈包的接收时间,本地时间戳
msg.feedback_time = Timestamp::Millis(clock_.TimeInMilliseconds());
//将数据包的信息放入队列中
msg.packet_feedbacks.push_back(packet);
//调用ALR码率估计器得到一个实时码率
acknowledged_bitrate_estimator_->IncomingPacketFeedbackVector(
msg.SortedByReceiveTime());
//bitrate_estimator_即为DelayBasedBwe。输入反馈包的信息、当前真实的发送码率。
DelayBasedBwe::Result result =
bitrate_estimator_->IncomingPacketFeedbackVector(
msg, acknowledged_bitrate_estimator_->bitrate(),
probe_bitrate_estimator_->FetchAndResetLastEstimatedBitrate(),
/*network_estimate*/ absl::nullopt, /*in_alr*/ false);
if (result.updated) {
bitrate_observer_.OnReceiveBitrateChanged(result.target_bitrate.bps());
}
}
DelayBasedBwe::IncomingPacketFeedbackVector:信息数据的入口:
/**
msg : 包含数据包信息
acked_bitrate : 当前实时发送码率
probe_bitrate : 码率,待分析
network_estimate : 值为null
in_alr : false
*/
DelayBasedBwe::Result DelayBasedBwe::IncomingPacketFeedbackVector(
const TransportPacketsFeedback& msg,
absl::optional acked_bitrate,
absl::optional probe_bitrate,
absl::optional network_estimate,
bool in_alr) {
RTC_DCHECK_RUNS_SERIALIZED(&network_race_);
//根据接收时间从低到高排序
auto packet_feedback_vector = msg.SortedByReceiveTime();
// TODO(holmer): An empty feedback vector here likely means that
// all acks were too late and that the send time history had
// timed out. We should reduce the rate when this occurs.
if (packet_feedback_vector.empty()) {
RTC_LOG(LS_WARNING) << "Very late feedback received.";
return DelayBasedBwe::Result();
}
if (!uma_recorded_) {
RTC_HISTOGRAM_ENUMERATION(kBweTypeHistogram,
BweNames::kSendSideTransportSeqNum,
BweNames::kBweNamesMax);
uma_recorded_ = true;
}
bool delayed_feedback = true;
bool recovered_from_overuse = false;
//保存当前的带宽使用状况
BandwidthUsage prev_detector_state = active_delay_detector_->State();
for (const auto& packet_feedback : packet_feedback_vector) {
delayed_feedback = false;
//输入每个反馈信息
IncomingPacketFeedback(packet_feedback, msg.feedback_time);
if (prev_detector_state == BandwidthUsage::kBwUnderusing &&
active_delay_detector_->State() == BandwidthUsage::kBwNormal) {
//带宽状态从“未满足”切换到“正常”时,需要从“过载”状态恢复
recovered_from_overuse = true;
}
prev_detector_state = active_delay_detector_->State();
}
if (delayed_feedback) {
// TODO(bugs.webrtc.org/10125): Design a better mechanism to safe-guard
// against building very large network queues.
return Result();
}
rate_control_.SetInApplicationLimitedRegion(in_alr);
rate_control_.SetNetworkStateEstimate(network_estimate);
return MaybeUpdateEstimate(acked_bitrate, probe_bitrate,
std::move(network_estimate),
recovered_from_overuse, in_alr, msg.feedback_time);
}
DelayBasedBwe::IncomingPacketFeedback
/**
packet_feedback:反馈信息,包含数据包接收时间、发送时间、数据包的大小
at_time:反馈包的接收时间,本地时间戳
*/
void DelayBasedBwe::IncomingPacketFeedback(const PacketResult& packet_feedback,
Timestamp at_time) {
// Reset if the stream has timed out.
// 根据反馈信息的接收时间,检查流是否超时,阈值2s
if (last_seen_packet_.IsInfinite() ||
at_time - last_seen_packet_ > kStreamTimeOut) {
video_inter_arrival_.reset(
new InterArrival(kTimestampGroupTicks, kTimestampToMs, true));
video_delay_detector_.reset(
new TrendlineEstimator(key_value_config_, network_state_predictor_));
audio_inter_arrival_.reset(
new InterArrival(kTimestampGroupTicks, kTimestampToMs, true));
audio_delay_detector_.reset(
new TrendlineEstimator(key_value_config_, network_state_predictor_));
active_delay_detector_ = video_delay_detector_.get();
}
last_seen_packet_ = at_time;
// Ignore "small" packets if many/most packets in the call are "large". The
// packet size may have a significant effect on the propagation delay,
// especially at low bandwidths. Variations in packet size will then show up
// as noise in the delay measurement. By default, we include all packets.
// 数据包大小
DataSize packet_size = packet_feedback.sent_packet.size;
if (!ignore_small_.small_threshold.IsZero()) {
double is_large =
static_cast(packet_size >= ignore_small_.large_threshold);
fraction_large_packets_ +=
ignore_small_.smoothing_factor * (is_large - fraction_large_packets_);
if (packet_size <= ignore_small_.small_threshold &&
fraction_large_packets_ >= ignore_small_.fraction_large) {
return;
}
}
// As an alternative to ignoring small packets, we can separate audio and
// video packets for overuse detection.
InterArrival* inter_arrival_for_packet = video_inter_arrival_.get();
DelayIncreaseDetectorInterface* delay_detector_for_packet =
video_delay_detector_.get();
if (separate_audio_.enabled) {
if (packet_feedback.sent_packet.audio) {
inter_arrival_for_packet = audio_inter_arrival_.get();
delay_detector_for_packet = audio_delay_detector_.get();
audio_packets_since_last_video_++;
if (audio_packets_since_last_video_ > separate_audio_.packet_threshold &&
packet_feedback.receive_time - last_video_packet_recv_time_ >
separate_audio_.time_threshold) {
active_delay_detector_ = audio_delay_detector_.get();
}
} else {
audio_packets_since_last_video_ = 0;
last_video_packet_recv_time_ =
std::max(last_video_packet_recv_time_, packet_feedback.receive_time);
active_delay_detector_ = video_delay_detector_.get();
}
}
//发送时间,转换成inter_arrival需要的时间格式,后面对inter_arrival再进行分析
uint32_t send_time_24bits =
static_cast(
((static_cast(packet_feedback.sent_packet.send_time.ms())
<< kAbsSendTimeFraction) +
500) /
1000) &
0x00FFFFFF;
// Shift up send time to use the full 32 bits that inter_arrival works with,
// so wrapping works properly.
uint32_t timestamp = send_time_24bits << kAbsSendTimeInterArrivalUpshift;
uint32_t timestamp_delta = 0;
int64_t recv_delta_ms = 0; //与上个包组的接收间隔时间,ms
int size_delta = 0;
bool calculated_deltas = inter_arrival_for_packet->ComputeDeltas(
timestamp, packet_feedback.receive_time.ms(), at_time.ms(),
packet_size.bytes(), ×tamp_delta, &recv_delta_ms, &size_delta);
//与上个包组的发送间隔时间,ms
double send_delta_ms = (1000.0 * timestamp_delta) / (1 << kInterArrivalShift);
delay_detector_for_packet->Update(recv_delta_ms, send_delta_ms,
packet_feedback.sent_packet.send_time.ms(),//当前包组的发送时间,ms
packet_feedback.receive_time.ms(),//当前包组的接收时间,ms
packet_size.bytes(), calculated_deltas);
}
DelayBasedBwe::MaybeUpdateEstimate
/**
acked_bitrate:当前实时码率
probe_bitrate:
state_estimate:为null
recovered_from_overuse:是否从过载恢复到正常
in_alr:为false
at_time:反馈包的接收时间,本地时间戳
*/
DelayBasedBwe::Result DelayBasedBwe::MaybeUpdateEstimate(
absl::optional acked_bitrate,
absl::optional probe_bitrate,
absl::optional state_estimate,
bool recovered_from_overuse,
bool in_alr,
Timestamp at_time) {
Result result;
// Currently overusing the bandwidth.
if (active_delay_detector_->State() == BandwidthUsage::kBwOverusing) {
//当前网络状态为过载
if (has_once_detected_overuse_ && in_alr && alr_limited_backoff_enabled_) {
if (rate_control_.TimeToReduceFurther(at_time, prev_bitrate_)) {
result.updated =
UpdateEstimate(at_time, prev_bitrate_, &result.target_bitrate);
result.backoff_in_alr = true;
}
} else if (acked_bitrate &&
rate_control_.TimeToReduceFurther(at_time, *acked_bitrate)) {
result.updated =
UpdateEstimate(at_time, acked_bitrate, &result.target_bitrate);
} else if (!acked_bitrate && rate_control_.ValidEstimate() &&
rate_control_.InitialTimeToReduceFurther(at_time)) {
// Overusing before we have a measured acknowledged bitrate. Reduce send
// rate by 50% every 200 ms.
// TODO(tschumim): Improve this and/or the acknowledged bitrate estimator
// so that we (almost) always have a bitrate estimate.
rate_control_.SetEstimate(rate_control_.LatestEstimate() / 2, at_time);
result.updated = true;
result.probe = false;
result.target_bitrate = rate_control_.LatestEstimate();
}
has_once_detected_overuse_ = true;
} else {
//当前网络状态未过载
if (probe_bitrate) {
result.probe = true;
result.updated = true;
result.target_bitrate = *probe_bitrate;
rate_control_.SetEstimate(*probe_bitrate, at_time);
} else {
result.updated =
UpdateEstimate(at_time, acked_bitrate, &result.target_bitrate);
result.recovered_from_overuse = recovered_from_overuse;
}
}
BandwidthUsage detector_state = active_delay_detector_->State();
if ((result.updated && prev_bitrate_ != result.target_bitrate) ||
detector_state != prev_state_) {
DataRate bitrate = result.updated ? result.target_bitrate : prev_bitrate_;
BWE_TEST_LOGGING_PLOT(1, "target_bitrate_bps", at_time.ms(), bitrate.bps());
if (event_log_) {
event_log_->Log(std::make_unique(
bitrate.bps(), detector_state));
}
prev_bitrate_ = bitrate;
prev_state_ = detector_state;
}
return result;
}
DelayBasedBwe::UpdateEstimate
/**
at_time:当前时间
acked_bitrate:试试接收网速
target_rate:用于输出最终的带宽
return,带宽是否更新
*/
bool DelayBasedBwe::UpdateEstimate(Timestamp at_time,
absl::optional acked_bitrate,
DataRate* target_rate) {
const RateControlInput input(active_delay_detector_->State(), acked_bitrate);
//将当前的网络状况信息更新入AimdRateControl
*target_rate = rate_control_.Update(&input, at_time);
return rate_control_.ValidEstimate();
}
aimd的全称是Additive Increase Multiplicative Decrease,意思是:和式增加,积式减少。
AimdRateControl::TimeToReduceFurther
/**
用于判断是否该减小码率
*/
bool AimdRateControl::TimeToReduceFurther(Timestamp at_time,
DataRate estimated_throughput) const {
//检测时间为一个rtt
const TimeDelta bitrate_reduction_interval =
rtt_.Clamped(TimeDelta::Millis(10), TimeDelta::Millis(200));
if (at_time - time_last_bitrate_change_ >= bitrate_reduction_interval) {
return true;
}
if (ValidEstimate()) {
// TODO(terelius/holmer): Investigate consequences of increasing
// the threshold to 0.95 * LatestEstimate().
const DataRate threshold = 0.5 * LatestEstimate();
return estimated_throughput < threshold;//当前网速为上一次的一半
}
return false;
}
AimdRateControl::Update
/**
input:当前的网络状况信息
at_time:当前时间
*/
DataRate AimdRateControl::Update(const RateControlInput* input,
Timestamp at_time) {
RTC_CHECK(input);
// Set the initial bit rate value to what we're receiving the first half
// second.
// TODO(bugs.webrtc.org/9379): The comment above doesn't match to the code.
if (!bitrate_is_initialized_) {
//码率还未初始化
const TimeDelta kInitializationTime = TimeDelta::Seconds(5);
RTC_DCHECK_LE(kBitrateWindowMs, kInitializationTime.ms());
if (time_first_throughput_estimate_.IsInfinite()) {
//更新第一次时间
if (input->estimated_throughput)
time_first_throughput_estimate_ = at_time;
} else if (at_time - time_first_throughput_estimate_ >
kInitializationTime &&
input->estimated_throughput) {
//经过5s时间后,更新当前码率值
current_bitrate_ = *input->estimated_throughput;
bitrate_is_initialized_ = true;
}
}
ChangeBitrate(*input, at_time);
return current_bitrate_;
}
AimdRateControl::ChangeState
/**
状态转移,根据得到的网络状态,进一步得到现在的码率控制状态,即保持,减少和增加。
一共维持三个状态,增长、保持、衰减,状态转换根据OveruseDetector的三个状态(Normal, Overuse, Underuse)来进行判断。
当Overuse发生时,无论什么状态都进入衰减。
当Underuse发生时,无论什么状态都进入保持状态。
当Normal发生时,在保持阶段,将进入增长。
*/
void AimdRateControl::ChangeState(const RateControlInput& input,
Timestamp at_time) {
switch (input.bw_state) {
case BandwidthUsage::kBwNormal:
if (rate_control_state_ == kRcHold) {
time_last_bitrate_change_ = at_time;
rate_control_state_ = kRcIncrease;
}
break;
case BandwidthUsage::kBwOverusing:
if (rate_control_state_ != kRcDecrease) {
rate_control_state_ = kRcDecrease;
}
break;
case BandwidthUsage::kBwUnderusing:
rate_control_state_ = kRcHold;
break;
default:
assert(false);
}
}
AimdRateControl::ChangeBitrate
// 根据当前的网络状态,更新现在的码率控制状态,并调整目标码率。
void AimdRateControl::ChangeBitrate(const RateControlInput& input,
Timestamp at_time) {
absl::optional new_bitrate;
DataRate estimated_throughput =
input.estimated_throughput.value_or(latest_estimated_throughput_);
if (input.estimated_throughput)
latest_estimated_throughput_ = *input.estimated_throughput;
// An over-use should always trigger us to reduce the bitrate, even though
// we have not yet established our first estimate. By acting on the over-use,
// we will end up with a valid estimate.
if (!bitrate_is_initialized_ &&
input.bw_state != BandwidthUsage::kBwOverusing)
return;
//根据网络状况,进行状态转移
ChangeState(input, at_time);
// We limit the new bitrate based on the troughput to avoid unlimited bitrate
// increases. We allow a bit more lag at very low rates to not too easily get
// stuck if the encoder produces uneven outputs.
//限制码率不要上升太快,限制在当前码率的1.5倍
const DataRate troughput_based_limit =
1.5 * estimated_throughput + DataRate::KilobitsPerSec(10);
switch (rate_control_state_) {
case kRcHold:
//保持状态,不作处理
break;
case kRcIncrease:
//link_capacity_.UpperBound()为平均码率+3倍的标准差
//超过最大值,认为这个均值无效,复位。
if (estimated_throughput > link_capacity_.UpperBound())
link_capacity_.Reset();
// Do not increase the delay based estimate in alr since the estimator
// will not be able to get transport feedback necessary to detect if
// the new estimate is correct.
// If we have previously increased above the limit (for instance due to
// probing), we don't allow further changes.
if (current_bitrate_ < troughput_based_limit &&
!(send_side_ && in_alr_ && no_bitrate_increase_in_alr_)) {
DataRate increased_bitrate = DataRate::MinusInfinity();
if (link_capacity_.has_estimate()) {
// The link_capacity estimate is reset if the measured throughput
// is too far from the estimate. We can therefore assume that our
// target rate is reasonably close to link capacity and use additive
// increase.
// 如果当前码率远超过统计的平均码率,前面就会复位
// 因此我们可以假定目标带宽接近统计的平均码率,所以此时增长需谨慎,使用加性增加。
DataRate additive_increase =
AdditiveRateIncrease(at_time, time_last_bitrate_change_);
increased_bitrate = current_bitrate_ + additive_increase;
} else {
// If we don't have an estimate of the link capacity, use faster ramp
// up to discover the capacity.
// 还未统计出带宽的上界,可以倍增码率,加快增加
DataRate multiplicative_increase = MultiplicativeRateIncrease(
at_time, time_last_bitrate_change_, current_bitrate_);
increased_bitrate = current_bitrate_ + multiplicative_increase;
}
new_bitrate = std::min(increased_bitrate, troughput_based_limit);
}
time_last_bitrate_change_ = at_time;
break;
case kRcDecrease: {
DataRate decreased_bitrate = DataRate::PlusInfinity();
// Set bit rate to something slightly lower than the measured throughput
// to get rid of any self-induced delay.
// beta_=0.85
decreased_bitrate = estimated_throughput * beta_;
if (decreased_bitrate > current_bitrate_ && !link_capacity_fix_) {
// TODO(terelius): The link_capacity estimate may be based on old
// throughput measurements. Relying on them may lead to unnecessary
// BWE drops.
if (link_capacity_.has_estimate()) {
//采用平均码率的85%
decreased_bitrate = beta_ * link_capacity_.estimate();
}
}
if (estimate_bounded_backoff_ && network_estimate_) {
decreased_bitrate = std::max(
decreased_bitrate, network_estimate_->link_capacity_lower * beta_);
}
// Avoid increasing the rate when over-using.
// 新的码率需要是当前码率和计算后码率中较低的值
if (decreased_bitrate < current_bitrate_) {
new_bitrate = decreased_bitrate;
}
if (bitrate_is_initialized_ && estimated_throughput < current_bitrate_) {
if (!new_bitrate.has_value()) {
last_decrease_ = DataRate::Zero();
} else {
last_decrease_ = current_bitrate_ - *new_bitrate;
}
}
if (estimated_throughput < link_capacity_.LowerBound()) {
// The current throughput is far from the estimated link capacity. Clear
// the estimate to allow an immediate update in OnOveruseDetected.
// 输入码率远小于平均码率,置位统计的信息
link_capacity_.Reset();
}
bitrate_is_initialized_ = true;
// 更新检测到的码率
// 仅在kRcDecrease状态时,更新link_capacity_
link_capacity_.OnOveruseDetected(estimated_throughput);
// Stay on hold until the pipes are cleared.
// 状态改为hold
rate_control_state_ = kRcHold;
time_last_bitrate_change_ = at_time;
time_last_bitrate_decrease_ = at_time;
break;
}
default:
assert(false);
}
//赋值当前的目标码率
current_bitrate_ = ClampBitrate(new_bitrate.value_or(current_bitrate_));
}
AimdRateControl::MultiplicativeRateIncrease
乘性码率增长,根据时间差来调整系数。
//乘性码率增长,根据时间差来调整系数。
DataRate AimdRateControl::MultiplicativeRateIncrease(
Timestamp at_time,
Timestamp last_time,
DataRate current_bitrate) const {
double alpha = 1.08;
if (last_time.IsFinite()) {
auto time_since_last_update = at_time - last_time;
//使用时间差作为系数(不高于1.0,秒为单位),1.08作为底数。
alpha = pow(alpha, std::min(time_since_last_update.seconds(), 1.0));
}
DataRate multiplicative_increase =
std::max(current_bitrate * (alpha - 1.0), DataRate::BitsPerSec(1000));
return multiplicative_increase;
}
AimdRateControl::AdditiveRateIncrease
加性码率增长,根据时间差来调整系数。
/**
获取每秒码率最大增加数,一个rtt时间增加一个数据包的大小
*/
double AimdRateControl::GetNearMaxIncreaseRateBpsPerSecond() const {
RTC_DCHECK(!current_bitrate_.IsZero());
// 默认30fps,由于每个包不超过mtu,一般也就1200,用这两个值估计每帧包数和平均每个包的大小
const TimeDelta kFrameInterval = TimeDelta::Seconds(1) / 30;
DataSize frame_size = current_bitrate_ * kFrameInterval;
const DataSize kPacketSize = DataSize::Bytes(1200);
// 每帧包数
double packets_per_frame = std::ceil(frame_size / kPacketSize);
// 平均每个包的大小
DataSize avg_packet_size = frame_size / packets_per_frame;
// Approximate the over-use estimator delay to 100 ms.
TimeDelta response_time = rtt_ + TimeDelta::Millis(100);
if (in_experiment_)
response_time = response_time * 2;
double increase_rate_bps_per_second =
(avg_packet_size / response_time).bps();
double kMinIncreaseRateBpsPerSecond = 4000;
return std::max(kMinIncreaseRateBpsPerSecond, increase_rate_bps_per_second);
}
//加性码率增长,根据时间差来调整系数。
DataRate AimdRateControl::AdditiveRateIncrease(Timestamp at_time,
Timestamp last_time) const {
double time_period_seconds = (at_time - last_time).seconds();
//GetNearMaxIncreaseRateBpsPerSecond得到接近
double data_rate_increase_bps =
GetNearMaxIncreaseRateBpsPerSecond() * time_period_seconds;
return DataRate::BitsPerSec(data_rate_increase_bps);
}