]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[TCP]: Extract DSACK detection code from tcp_sacktag_write_queue().
authorDavid S. Miller <davem@davemloft.net>
Tue, 19 Jun 2007 05:43:06 +0000 (22:43 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Wed, 10 Oct 2007 23:47:51 +0000 (16:47 -0700)
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/tcp_input.c

index 0aa17243d3694e655ffdbf7e5e0598c2f2b271a4..5187870d03337a26fcf40edd002570f9a8a581c7 100644 (file)
@@ -960,6 +960,39 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
  * Both of these heuristics are not used in Loss state, when we cannot
  * account for retransmits accurately.
  */
+static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
+                          struct tcp_sack_block_wire *sp, int num_sacks,
+                          u32 prior_snd_una)
+{
+       u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq));
+       u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq));
+       int dup_sack = 0;
+
+       if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
+               dup_sack = 1;
+               tp->rx_opt.sack_ok |= 4;
+               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+       } else if (num_sacks > 1) {
+               u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq));
+               u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq));
+
+               if (!after(end_seq_0, end_seq_1) &&
+                   !before(start_seq_0, start_seq_1)) {
+                       dup_sack = 1;
+                       tp->rx_opt.sack_ok |= 4;
+                       NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+               }
+       }
+
+       /* D-SACK for already forgotten data... Do dumb counting. */
+       if (dup_sack &&
+           !after(end_seq_0, prior_snd_una) &&
+           after(end_seq_0, tp->undo_marker))
+               tp->undo_retrans--;
+
+       return dup_sack;
+}
+
 static int
 tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
 {
@@ -985,27 +1018,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        }
        prior_fackets = tp->fackets_out;
 
-       /* Check for D-SACK. */
-       if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
-               flag |= FLAG_DSACKING_ACK;
-               found_dup_sack = 1;
-               tp->rx_opt.sack_ok |= 4;
-               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
-       } else if (num_sacks > 1 &&
-                       !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
-                       !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
+       found_dup_sack = tcp_check_dsack(tp, ack_skb, sp,
+                                        num_sacks, prior_snd_una);
+       if (found_dup_sack)
                flag |= FLAG_DSACKING_ACK;
-               found_dup_sack = 1;
-               tp->rx_opt.sack_ok |= 4;
-               NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
-       }
-
-       /* D-SACK for already forgotten data...
-        * Do dumb counting. */
-       if (found_dup_sack &&
-                       !after(ntohl(sp[0].end_seq), prior_snd_una) &&
-                       after(ntohl(sp[0].end_seq), tp->undo_marker))
-               tp->undo_retrans--;
 
        /* Eliminate too old ACKs, but take into
         * account more or less fresh ones, they can