]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[SKB]: __skb_append = __skb_queue_after
authorGerrit Renker <gerrit@erg.abdn.ac.uk>
Mon, 14 Apr 2008 07:05:09 +0000 (00:05 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 14 Apr 2008 07:05:09 +0000 (00:05 -0700)
This expresses __skb_append in terms of __skb_queue_after, exploiting that

  __skb_append(old, new, list) = __skb_queue_after(list, old, new).

Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/skbuff.h
include/net/tcp.h
net/core/skbuff.c
net/ipv4/tcp_input.c

index bb107ab675fc29f1f325da5226f49a2e6ca58871..83c851846829f22f980244dca0c02f74bb5f47e2 100644 (file)
@@ -697,6 +697,9 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
        __skb_insert(newsk, prev, prev->next, list);
 }
 
+extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+                      struct sk_buff_head *list);
+
 /**
  *     __skb_queue_head - queue a buffer at the list head
  *     @list: list to use
@@ -738,15 +741,6 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
        next->prev  = prev->next = newsk;
 }
 
-/*
- *     Place a packet after a given packet in a list.
- */
-extern void       skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
-{
-       __skb_insert(newsk, old, old->next, list);
-}
-
 /*
  * remove sk_buff from list. _Must_ be called atomically, and with
  * the list known..
index 58d82822414d04263b3c697612c1dfcaf12af88d..2ab350eca02ee87fe49be39e90dec29a91d428fe 100644 (file)
@@ -1247,7 +1247,7 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
                                                struct sk_buff *buff,
                                                struct sock *sk)
 {
-       __skb_append(skb, buff, &sk->sk_write_queue);
+       __skb_queue_after(&sk->sk_write_queue, skb, buff);
 }
 
 /* Insert skb between prev and next on the write queue of sk.  */
index e4259215607f6772ce6e86ab7605030cc79d603f..4cd12d99b12e3930fb3a6865444af412a49445d9 100644 (file)
@@ -1860,7 +1860,7 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
        unsigned long flags;
 
        spin_lock_irqsave(&list->lock, flags);
-       __skb_append(old, newsk, list);
+       __skb_queue_after(list, old, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
 
index 6e46b4c0f28cd4e3a22e5267e4fd2076240e4820..74361195604586212170266b461e83e3e7923610 100644 (file)
@@ -3968,7 +3968,7 @@ drop:
                u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
                if (seq == TCP_SKB_CB(skb1)->end_seq) {
-                       __skb_append(skb1, skb, &tp->out_of_order_queue);
+                       __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
 
                        if (!tp->rx_opt.num_sacks ||
                            tp->selective_acks[0].end_seq != seq)