]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[TCP]: Reduce sacked_out with reno when purging write_queue
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Thu, 9 Aug 2007 11:53:36 +0000 (14:53 +0300)
committerDavid S. Miller <davem@sunset.davemloft.net>
Wed, 10 Oct 2007 23:47:58 +0000 (16:47 -0700)
Previously TCP had a transitional state during which reno
counted segments that are already below the current window into
sacked_out, which is now prevented. In addition, re-try now
the unconditional S+L skb catching.

This approach conservatively calls just remove_sack and leaves
reset_sack() calls alone. The best solution to the whole problem
would be to first calculate the new sacked_out fully (this patch
does not move reno_sack_reset calls from original sites and thus
does not implement this). However, that would require very
invasive change to fastretrans_alert (perhaps even slicing it to
two halves). Alternatively, all callers of tcp_packets_in_flight
(i.e., users that depend on sacked_out) should be postponed
until the new sacked_out has been calculated but it isn't any
simpler alternative.

Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h
net/ipv4/tcp_input.c

index 7042c32085f5ec63679d5980121fcdd5f8f3cda1..064c92fe00d2968bba50e43799acdbf4896e5acb 100644 (file)
@@ -759,8 +759,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 }
 
 /* Use define here intentionally to get BUG_ON location shown at the caller */
-#define tcp_verify_left_out(tp) \
-       BUG_ON(tp->rx_opt.sack_ok && (tcp_left_out(tp) > tp->packets_out))
+#define tcp_verify_left_out(tp)        BUG_ON(tcp_left_out(tp) > tp->packets_out)
 
 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
index bf4fc3516fb99d92abec60ac76cf54c894ec93ef..f8af018dd22434e6287c3b0c9c456899ea69ea90 100644 (file)
@@ -2187,7 +2187,7 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
  * tcp_xmit_retransmit_queue().
  */
 static void
-tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
+tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2273,12 +2273,8 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
                if (!(flag & FLAG_SND_UNA_ADVANCED)) {
                        if (IsReno(tp) && is_dupack)
                                tcp_add_reno_sack(sk);
-               } else {
-                       int acked = prior_packets - tp->packets_out;
-                       if (IsReno(tp))
-                               tcp_remove_reno_sacks(sk, acked);
-                       do_lost = tcp_try_undo_partial(sk, acked);
-               }
+               } else
+                       do_lost = tcp_try_undo_partial(sk, pkts_acked);
                break;
        case TCP_CA_Loss:
                if (flag&FLAG_DATA_ACKED)
@@ -2577,6 +2573,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                tcp_ack_update_rtt(sk, acked, seq_rtt);
                tcp_ack_packets_out(sk);
 
+               if (IsReno(tp))
+                       tcp_remove_reno_sacks(sk, pkts_acked);
+
                if (ca_ops->pkts_acked) {
                        s32 rtt_us = -1;
 
@@ -2927,7 +2926,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
                    tcp_may_raise_cwnd(sk, flag))
                        tcp_cong_avoid(sk, ack, prior_in_flight, 0);
-               tcp_fastretrans_alert(sk, prior_packets, flag);
+               tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag);
        } else {
                if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
                        tcp_cong_avoid(sk, ack, prior_in_flight, 1);