mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
tcp: add ece_ack flag to reno sack functions
Pass a boolean flag that tells the ECE state of the current ack to reno sack functions. This is pure refactor for future patches to improve tracking delivered counts. Signed-off-by: Yousuk Seung <ysseung@google.com> Signed-off-by: Yuchung Cheng <ycheng@google.com> Acked-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
fdb7eb21dd
commit
c634e34f6e
@ -1893,7 +1893,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
|
||||
|
||||
/* Emulate SACKs for SACKless connection: account for a new dupack. */
|
||||
|
||||
static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
|
||||
static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
|
||||
{
|
||||
if (num_dupack) {
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
@ -1911,7 +1911,7 @@ static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
|
||||
|
||||
/* Account for ACK, ACKing some data in Reno Recovery phase. */
|
||||
|
||||
static void tcp_remove_reno_sacks(struct sock *sk, int acked)
|
||||
static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
@ -2697,7 +2697,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
|
||||
* delivered. Lower inflight to clock out (re)tranmissions.
|
||||
*/
|
||||
if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
|
||||
tcp_add_reno_sack(sk, num_dupack);
|
||||
tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
|
||||
else if (flag & FLAG_SND_UNA_ADVANCED)
|
||||
tcp_reset_reno_sack(tp);
|
||||
}
|
||||
@ -2779,6 +2779,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int fast_rexmit = 0, flag = *ack_flag;
|
||||
bool ece_ack = flag & FLAG_ECE;
|
||||
bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
|
||||
tcp_force_fast_retransmit(sk));
|
||||
|
||||
@ -2787,7 +2788,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
|
||||
|
||||
/* Now state machine starts.
|
||||
* A. ECE, hence prohibit cwnd undoing, the reduction is required. */
|
||||
if (flag & FLAG_ECE)
|
||||
if (ece_ack)
|
||||
tp->prior_ssthresh = 0;
|
||||
|
||||
/* B. In all the states check for reneging SACKs. */
|
||||
@ -2828,7 +2829,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
|
||||
case TCP_CA_Recovery:
|
||||
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
|
||||
if (tcp_is_reno(tp))
|
||||
tcp_add_reno_sack(sk, num_dupack);
|
||||
tcp_add_reno_sack(sk, num_dupack, ece_ack);
|
||||
} else {
|
||||
if (tcp_try_undo_partial(sk, prior_snd_una))
|
||||
return;
|
||||
@ -2853,7 +2854,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
|
||||
if (tcp_is_reno(tp)) {
|
||||
if (flag & FLAG_SND_UNA_ADVANCED)
|
||||
tcp_reset_reno_sack(tp);
|
||||
tcp_add_reno_sack(sk, num_dupack);
|
||||
tcp_add_reno_sack(sk, num_dupack, ece_ack);
|
||||
}
|
||||
|
||||
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
|
||||
@ -2877,7 +2878,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
|
||||
}
|
||||
|
||||
/* Otherwise enter Recovery state */
|
||||
tcp_enter_recovery(sk, (flag & FLAG_ECE));
|
||||
tcp_enter_recovery(sk, ece_ack);
|
||||
fast_rexmit = 1;
|
||||
}
|
||||
|
||||
@ -3053,7 +3054,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
|
||||
*/
|
||||
static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
|
||||
u32 prior_snd_una,
|
||||
struct tcp_sacktag_state *sack)
|
||||
struct tcp_sacktag_state *sack, bool ece_ack)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
u64 first_ackt, last_ackt;
|
||||
@ -3191,7 +3192,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
|
||||
}
|
||||
|
||||
if (tcp_is_reno(tp)) {
|
||||
tcp_remove_reno_sacks(sk, pkts_acked);
|
||||
tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);
|
||||
|
||||
/* If any of the cumulatively ACKed segments was
|
||||
* retransmitted, non-SACK case cannot confirm that
|
||||
@ -3685,7 +3686,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
goto no_queue;
|
||||
|
||||
/* See if we can take anything off of the retransmit queue. */
|
||||
flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state);
|
||||
flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state,
|
||||
flag & FLAG_ECE);
|
||||
|
||||
tcp_rack_update_reo_wnd(sk, &rs);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user