aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-08-21 21:17:03 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2007-08-30 23:01:51 -0700
commit783366ad4b212cde069c50903494eb6a6b83958c (patch)
tree533c98ecbee00ae462deabe5320ce323d7b37725 /net
parente061467dc5b4c78d4fe61ea6d7a0c848e22882d7 (diff)
TCP: Fix TCP rate-halving on bidirectional flows.
Actually, the ratehalving seems to work too well, as cwnd is reduced on every second ACK even though the packets in flight remains unchanged. Recoveries in a bidirectional flows suffer quite badly because of this, both NewReno and SACK are affected. After this patch, rate halving is performed for ACK only if packets in flight was supposedly changed too. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4e5884ac8f2..aea4b55bac2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1851,19 +1851,22 @@ static inline u32 tcp_cwnd_min(const struct sock *sk)
}
/* Decrease cwnd each second ack. */
-static void tcp_cwnd_down(struct sock *sk)
+static void tcp_cwnd_down(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1;
- tp->snd_cwnd_cnt = decr&1;
- decr >>= 1;
+ if ((flag&FLAG_FORWARD_PROGRESS) ||
+ (IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
+ tp->snd_cwnd_cnt = decr&1;
+ decr >>= 1;
- if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
- tp->snd_cwnd -= decr;
+ if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
+ tp->snd_cwnd -= decr;
- tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
}
/* Nothing was retransmitted or returned timestamp is less
@@ -2060,7 +2063,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
}
tcp_moderate_cwnd(tp);
} else {
- tcp_cwnd_down(sk);
+ tcp_cwnd_down(sk, flag);
}
}
@@ -2260,7 +2263,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
if (is_dupack || tcp_head_timedout(sk))
tcp_update_scoreboard(sk);
- tcp_cwnd_down(sk);
+ tcp_cwnd_down(sk, flag);
tcp_xmit_retransmit_queue(sk);
}