diff -urN linux-2.6.32.7/include/linux/sysctl.h linux-2.6.32.7_pacing/include/linux/sysctl.h
--- linux-2.6.32.7/include/linux/sysctl.h 2010-01-28 15:06:20.000000000 -0800
+++ linux-2.6.32.7_pacing/include/linux/sysctl.h 2016-11-19 00:04:20.108999867 -0800
@@ -426,14 +426,15 @@
NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
NET_TCP_DMA_COPYBREAK=116,
NET_TCP_SLOW_START_AFTER_IDLE=117,
- NET_CIPSOV4_CACHE_ENABLE=118,
- NET_CIPSOV4_CACHE_BUCKET_SIZE=119,
- NET_CIPSOV4_RBM_OPTFMT=120,
- NET_CIPSOV4_RBM_STRICTVALID=121,
- NET_TCP_AVAIL_CONG_CONTROL=122,
- NET_TCP_ALLOWED_CONG_CONTROL=123,
- NET_TCP_MAX_SSTHRESH=124,
- NET_TCP_FRTO_RESPONSE=125,
+ NET_TCP_PACING=118,
+ NET_CIPSOV4_CACHE_ENABLE=119,
+ NET_CIPSOV4_CACHE_BUCKET_SIZE=120,
+ NET_CIPSOV4_RBM_OPTFMT=121,
+ NET_CIPSOV4_RBM_STRICTVALID=122,
+ NET_TCP_AVAIL_CONG_CONTROL=123,
+ NET_TCP_ALLOWED_CONG_CONTROL=124,
+ NET_TCP_MAX_SSTHRESH=125,
+ NET_TCP_FRTO_RESPONSE=126,
};
enum {
diff -urN linux-2.6.32.7/include/linux/tcp.h linux-2.6.32.7_pacing/include/linux/tcp.h
--- linux-2.6.32.7/include/linux/tcp.h 2010-01-28 15:06:20.000000000 -0800
+++ linux-2.6.32.7_pacing/include/linux/tcp.h 2016-11-19 00:05:25.876998887 -0800
@@ -398,6 +398,17 @@
u32 probe_seq_start;
u32 probe_seq_end;
} mtu_probe;
+
+#ifdef CONFIG_TCP_PACING
+/* TCP Pacing structure */
+ struct {
+ struct timer_list timer;
+ __u16 count;
+ __u16 burst;
+ __u8 lock;
+ __u8 delta;
+ } pacing;
+#endif
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */
diff -urN linux-2.6.32.7/include/net/tcp.h linux-2.6.32.7_pacing/include/net/tcp.h
--- linux-2.6.32.7/include/net/tcp.h 2010-01-28 15:06:20.000000000 -0800
+++ linux-2.6.32.7_pacing/include/net/tcp.h 2016-11-19 00:06:53.853999772 -0800
@@ -236,6 +236,9 @@
extern int sysctl_tcp_base_mss;
extern int sysctl_tcp_workaround_signed_windows;
extern int sysctl_tcp_slow_start_after_idle;
+#ifdef CONFIG_TCP_PACING
+extern int sysctl_tcp_pacing;
+#endif
extern int sysctl_tcp_max_ssthresh;
extern atomic_t tcp_memory_allocated;
@@ -498,6 +501,11 @@
extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
extern unsigned int tcp_current_mss(struct sock *sk);
+#ifdef CONFIG_TCP_PACING
+extern void tcp_pacing_recalc_delta(struct sock *sk);
+extern void tcp_pacing_reset_timer(struct sock *sk);
+#endif
+
/* Bound MSS / TSO packet size with the half of the window */
static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
{
diff -urN linux-2.6.32.7/net/ipv4/Kconfig linux-2.6.32.7_pacing/net/ipv4/Kconfig
--- linux-2.6.32.7/net/ipv4/Kconfig 2010-01-28 15:06:20.000000000 -0800
+++ linux-2.6.32.7_pacing/net/ipv4/Kconfig 2016-11-19 00:07:48.466000097 -0800
@@ -543,6 +543,21 @@
loss packets.
See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf
+config TCP_PACING
+ bool "TCP Pacing"
+ depends on EXPERIMENTAL
+ select HZ_1000
+ default n
+ ---help---
+ Many researchers have observed that TCP's congestion control mechanisms
+ can lead to bursty traffic flows on modern high-speed networks, with a
+ negative impact on overall network efficiency. A proposed solution to this
+ problem is to evenly space, or "pace", data sent into the network over an
+ entire round-trip time, so that data is not sent in a burst.
+ To enable this feature, please refer to Documentation/networking/ip-sysctl.txt.
+ If unsure, say N.
+
+
config TCP_CONG_YEAH
tristate "YeAH TCP"
depends on EXPERIMENTAL
diff -urN linux-2.6.32.7/net/ipv4/sysctl_net_ipv4.c linux-2.6.32.7_pacing/net/ipv4/sysctl_net_ipv4.c
--- linux-2.6.32.7/net/ipv4/sysctl_net_ipv4.c 2010-01-28 15:06:20.000000000 -0800
+++ linux-2.6.32.7_pacing/net/ipv4/sysctl_net_ipv4.c 2016-11-19 00:09:19.760999390 -0800
@@ -742,6 +742,16 @@
.strategy = sysctl_intvec,
.extra1 = &zero
},
+#ifdef CONFIG_TCP_PACING
+ {
+ .ctl_name = NET_TCP_PACING,
+ .procname = "tcp_pacing",
+ .data = &sysctl_tcp_pacing,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+#endif
{ .ctl_name = 0 }
};
diff -urN linux-2.6.32.7/net/ipv4/tcp_input.c linux-2.6.32.7_pacing/net/ipv4/tcp_input.c
--- linux-2.6.32.7/net/ipv4/tcp_input.c 2010-01-28 15:06:20.000000000 -0800
+++ linux-2.6.32.7_pacing/net/ipv4/tcp_input.c 2016-11-19 00:09:55.244999661 -0800
@@ -3663,6 +3663,10 @@
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight);
}
+#ifdef CONFIG_TCP_PACING
+ if(sysctl_tcp_pacing)
+ tcp_pacing_recalc_delta(sk);
+#endif
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
dst_confirm(sk->sk_dst_cache);
diff -urN linux-2.6.32.7/net/ipv4/tcp_output.c linux-2.6.32.7_pacing/net/ipv4/tcp_output.c
--- linux-2.6.32.7/net/ipv4/tcp_output.c 2010-01-28 15:06:20.000000000 -0800
+++ linux-2.6.32.7_pacing/net/ipv4/tcp_output.c 2016-11-19 00:26:03.760999278 -0800
@@ -59,6 +59,10 @@
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
+#ifdef CONFIG_TCP_PACING
+int sysctl_tcp_pacing=0;
+#endif
+
/* Account for new data that has been sent to the network. */
static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
{
@@ -654,6 +658,12 @@
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
+#ifdef CONFIG_TCP_PACING
+ if(sysctl_tcp_pacing) {
+ tcp_pacing_reset_timer(sk);
+ tp->pacing.lock = 1;
+ }
+#endif
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
@@ -1342,6 +1352,14 @@
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
+#ifdef CONFIG_TCP_PACING
+ /* TCP Pacing conflicts with this algorithm.
+ * When Pacing is enabled, don't try to defer.
+ */
+ if(sysctl_tcp_pacing)
+ goto send_now;
+#endif
+
if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
goto send_now;
@@ -1577,6 +1595,11 @@
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
break;
+#ifdef CONFIG_TCP_PACING
+ if (sysctl_tcp_pacing && tp->pacing.lock)
+ return 0;
+#endif
+
if (tso_segs == 1) {
if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
(tcp_skb_is_last(sk, skb) ?
@@ -1588,9 +1611,14 @@
}
limit = mss_now;
- if (tso_segs > 1 && !tcp_urg_mode(tp))
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
cwnd_quota);
+#ifdef CONFIG_TCP_PACING
+ if (sysctl_tcp_pacing && sent_pkts >= tp->pacing.burst)
+ tp->pacing.lock=1;
+#endif
+
if (skb->len > limit &&
unlikely(tso_fragment(sk, skb, limit, mss_now)))
@@ -1951,6 +1979,11 @@
}
}
+#ifdef CONFIG_TCP_PACING
+ if (sysctl_tcp_pacing && tp->pacing.lock)
+ return -EAGAIN;
+#endif
+
/* Make a copy, if the first transmission SKB clone we made
* is still in somebody's hands, else make a clone.
*/
diff -urN linux-2.6.32.7/net/ipv4/tcp_timer.c linux-2.6.32.7_pacing/net/ipv4/tcp_timer.c
--- linux-2.6.32.7/net/ipv4/tcp_timer.c 2010-01-28 15:06:20.000000000 -0800
+++ linux-2.6.32.7_pacing/net/ipv4/tcp_timer.c 2016-11-19 00:39:38.195998392 -0800
@@ -34,10 +34,19 @@
static void tcp_delack_timer(unsigned long);
static void tcp_keepalive_timer (unsigned long data);
+#ifdef CONFIG_TCP_PACING
+static void tcp_pacing_timer(unsigned long data);
+#endif
+
void tcp_init_xmit_timers(struct sock *sk)
{
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
&tcp_keepalive_timer);
+#ifdef CONFIG_TCP_PACING
+ init_timer(&(tcp_sk(sk)->pacing.timer));
+ tcp_sk(sk)->pacing.timer.function=&tcp_pacing_timer;
+ tcp_sk(sk)->pacing.timer.data = (unsigned long) sk;
+#endif
}
EXPORT_SYMBOL(tcp_init_xmit_timers);
@@ -535,3 +544,112 @@
bh_unlock_sock(sk);
sock_put(sk);
}
+
+#ifdef CONFIG_TCP_PACING
+/*
+ * This is the timer used to spread packets.
+ * a delta value is computed on rtt/cwnd,
+ * and will be our expire interval.
+ * The timer has to be restarted when a segment is sent out.
+ */
+static void tcp_pacing_timer(unsigned long data)
+{
+ struct sock *sk = (struct sock*)data;
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if(!sysctl_tcp_pacing)
+ return;
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ /* Try again later */
+ if (!mod_timer(&tp->pacing.timer, jiffies + 1))
+ sock_hold(sk);
+ goto out_unlock;
+ }
+
+ if (sk->sk_state == TCP_CLOSE)
+ goto out;
+
+ /* Unlock sending, so when next ack is received it will pass.
+ *If there are no packets scheduled, do nothing.
+ */
+ tp->pacing.lock=0;
+
+ if(!sk->sk_send_head){
+ /* Sending queue empty */
+ goto out;
+ }
+
+ /* Handler */
+ tcp_push_pending_frames(sk);
+
+ out:
+ if (tcp_memory_pressure)
+ sk_mem_reclaim(sk);
+
+ out_unlock:
+ bh_unlock_sock(sk);
+ sock_put(sk);
+}
+
+void tcp_pacing_reset_timer(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ __u32 timeout = jiffies+tp->pacing.delta;
+
+ if(!sysctl_tcp_pacing)
+ return;
+ if (!mod_timer(&tp->pacing.timer, timeout))
+ sock_hold(sk);
+}
+EXPORT_SYMBOL(tcp_pacing_reset_timer);
+
+/*
+ * This routine computes tcp_pacing delay, using
+ * a simplified uniform pacing policy.
+ */
+void tcp_pacing_recalc_delta(struct sock *sk)
+{
+ struct tcp_sock *tp=tcp_sk(sk);
+ __u32 window=(tp->snd_cwnd)<<3;
+ __u32 srtt = tp->srtt;
+ __u32 round=0;
+ __u32 curmss=tp->mss_cache;
+ int state=inet_csk(sk)->icsk_ca_state;
+
+ if( (state==TCP_CA_Recovery) &&(tp->snd_cwnd < tp->snd_ssthresh))
+ window=(tp->snd_ssthresh)<<3;
+
+ if( (tp->snd_wnd/curmss) < tp->snd_cwnd )
+ window = (tp->snd_wnd/curmss)<<3;
+
+ if (window>1 && srtt){
+ if (window <= srtt){
+ tp->pacing.delta=(srtt/window);
+ if(srtt%window)
+ round=( (srtt/(srtt%window)) / tp->pacing.delta);
+ if (tp->pacing.count >= (round-1) &&(round>1)){
+ tp->pacing.delta++;
+ tp->pacing.count=0;
+ }
+ tp->pacing.burst=1;
+ } else {
+ tp->pacing.delta=1;
+ tp->pacing.burst=(window/srtt);
+ if(window%srtt)
+ round=( (window/(window%srtt)) * tp->pacing.burst);
+ if (tp->pacing.count >= (round-1) && (round>1)){
+ tp->pacing.burst++;
+ tp->pacing.count=0;
+ }
+ }
+ } else {
+ tp->pacing.delta=0;
+ tp->pacing.burst=1;
+ }
+}
+
+EXPORT_SYMBOL(tcp_pacing_recalc_delta);
+
+#endif
如果你已经读了CUBIC的Paper,干嘛为代码中的cube_factor以及cube_rtt_scale而纠结,我告诉温州老板这只是控制曲线形状的参数,就是方程式中的C,K之类的,老板就开始纠结代码中的计算问题了。...其实Linux内核的代码只是一种写法而已,给你一个数学公式,相信你也可以正确实现它,然后别人会纠结你为什么这么实现,这其实只是你的一种方法而已,没有为什么。这不是一种正确的学习方法,这会让人抓不到重点。
-----------------------------------------