mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 01:54:09 +08:00
85f7e7508a
congestion control algorithms that do not halve cwnd in their .ssthresh should provide a .cwnd_undo rather than rely on current fallback which assumes reno halving (and thus doubles the cwnd). All of these do 'something else' in their .ssthresh implementation, thus store the cwnd on loss and provide .undo_cwnd to restore it again. A followup patch will remove the fallback and all algorithms will need to provide a .cwnd_undo function. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: David S. Miller <davem@davemloft.net>
78 lines
1.7 KiB
C
78 lines
1.7 KiB
C
/* Tom Kelly's Scalable TCP
|
|
*
|
|
* See http://www.deneholme.net/tom/scalable/
|
|
*
|
|
* John Heffner <jheffner@sc.edu>
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <net/tcp.h>
|
|
|
|
/* These factors derived from the recommended values in the aer:
|
|
* .01 and and 7/8. We use 50 instead of 100 to account for
|
|
* delayed ack.
|
|
*/
|
|
#define TCP_SCALABLE_AI_CNT 50U
|
|
#define TCP_SCALABLE_MD_SCALE 3
|
|
|
|
struct scalable {
|
|
u32 loss_cwnd;
|
|
};
|
|
|
|
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|
{
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
if (!tcp_is_cwnd_limited(sk))
|
|
return;
|
|
|
|
if (tcp_in_slow_start(tp))
|
|
tcp_slow_start(tp, acked);
|
|
else
|
|
tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
|
|
1);
|
|
}
|
|
|
|
static u32 tcp_scalable_ssthresh(struct sock *sk)
|
|
{
|
|
const struct tcp_sock *tp = tcp_sk(sk);
|
|
struct scalable *ca = inet_csk_ca(sk);
|
|
|
|
ca->loss_cwnd = tp->snd_cwnd;
|
|
|
|
return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
|
|
}
|
|
|
|
static u32 tcp_scalable_cwnd_undo(struct sock *sk)
|
|
{
|
|
const struct scalable *ca = inet_csk_ca(sk);
|
|
|
|
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
|
|
}
|
|
|
|
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
|
|
.ssthresh = tcp_scalable_ssthresh,
|
|
.undo_cwnd = tcp_scalable_cwnd_undo,
|
|
.cong_avoid = tcp_scalable_cong_avoid,
|
|
|
|
.owner = THIS_MODULE,
|
|
.name = "scalable",
|
|
};
|
|
|
|
static int __init tcp_scalable_register(void)
|
|
{
|
|
return tcp_register_congestion_control(&tcp_scalable);
|
|
}
|
|
|
|
static void __exit tcp_scalable_unregister(void)
|
|
{
|
|
tcp_unregister_congestion_control(&tcp_scalable);
|
|
}
|
|
|
|
module_init(tcp_scalable_register);
|
|
module_exit(tcp_scalable_unregister);
|
|
|
|
MODULE_AUTHOR("John Heffner");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Scalable TCP");
|