mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-22 17:33:01 +00:00
9f9843a751
Slow start now increases cwnd by 1 if an ACK acknowledges some packets, regardless the number of packets. Consequently slow start performance is highly dependent on the degree of the stretch ACKs caused by receiver or network ACK compression mechanisms (e.g., delayed-ACK, GRO, etc). But slow start algorithm is to send twice the amount of packets of packets left so it should process a stretch ACK of degree N as if N ACKs of degree 1, then exits when cwnd exceeds ssthresh. A follow up patch will use the remainder of the N (if greater than 1) to adjust cwnd in the congestion avoidance phase. In addition this patch retires the experimental limited slow start (LSS) feature. LSS has multiple drawbacks but questionable benefit. The fractional cwnd increase in LSS requires a loop in slow start even though it's rarely used. Configuring such an increase step via a global sysctl on different BDPS seems hard. Finally and most importantly the slow start overshoot concern is now better covered by the Hybrid slow start (hystart) enabled by default. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
64 lines
1.4 KiB
C
64 lines
1.4 KiB
C
/* Tom Kelly's Scalable TCP
|
|
*
|
|
* See http://www.deneholme.net/tom/scalable/
|
|
*
|
|
* John Heffner <jheffner@sc.edu>
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <net/tcp.h>
|
|
|
|
/* These factors derived from the recommended values in the aer:
|
|
* .01 and and 7/8. We use 50 instead of 100 to account for
|
|
* delayed ack.
|
|
*/
|
|
#define TCP_SCALABLE_AI_CNT 50U
|
|
#define TCP_SCALABLE_MD_SCALE 3
|
|
|
|
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked,
|
|
u32 in_flight)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
if (!tcp_is_cwnd_limited(sk, in_flight))
|
|
return;
|
|
|
|
if (tp->snd_cwnd <= tp->snd_ssthresh)
|
|
tcp_slow_start(tp, acked);
|
|
else
|
|
tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
|
|
}
|
|
|
|
static u32 tcp_scalable_ssthresh(struct sock *sk)
|
|
{
|
|
const struct tcp_sock *tp = tcp_sk(sk);
|
|
return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
|
|
}
|
|
|
|
|
|
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
|
|
.ssthresh = tcp_scalable_ssthresh,
|
|
.cong_avoid = tcp_scalable_cong_avoid,
|
|
.min_cwnd = tcp_reno_min_cwnd,
|
|
|
|
.owner = THIS_MODULE,
|
|
.name = "scalable",
|
|
};
|
|
|
|
static int __init tcp_scalable_register(void)
|
|
{
|
|
return tcp_register_congestion_control(&tcp_scalable);
|
|
}
|
|
|
|
static void __exit tcp_scalable_unregister(void)
|
|
{
|
|
tcp_unregister_congestion_control(&tcp_scalable);
|
|
}
|
|
|
|
module_init(tcp_scalable_register);
|
|
module_exit(tcp_scalable_unregister);
|
|
|
|
MODULE_AUTHOR("John Heffner");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Scalable TCP");
|