]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[NET]: Kill skb->list
authorDavid S. Miller <davem@davemloft.net>
Wed, 10 Aug 2005 02:25:21 +0000 (19:25 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Mon, 29 Aug 2005 22:31:14 +0000 (15:31 -0700)
Remove the "list" member of struct sk_buff, as it is entirely
redundant.  All SKB list removal callers know which list the
SKB is on, so storing this in sk_buff does nothing other than
taking up some space.

Two tricky bits were SCTP, which I took care of, and two ATM
drivers which Francois Romieu <romieu@fr.zoreil.com> fixed
up.

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
29 files changed:
drivers/atm/nicstar.c
drivers/atm/nicstar.h
drivers/atm/zatm.c
drivers/bluetooth/bfusb.c
drivers/ieee1394/ieee1394_core.c
drivers/isdn/act2000/capi.c
drivers/net/shaper.c
drivers/net/wan/sdla_fr.c
drivers/usb/net/usbnet.c
include/linux/skbuff.h
net/atm/ipcommon.c
net/ax25/ax25_subr.c
net/core/skbuff.c
net/decnet/af_decnet.c
net/decnet/dn_nsp_out.c
net/econet/af_econet.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/irda/irlap_frame.c
net/lapb/lapb_subr.c
net/llc/af_llc.c
net/llc/llc_conn.c
net/netrom/nr_subr.c
net/rose/rose_subr.c
net/sctp/socket.c
net/sctp/ulpqueue.c
net/unix/garbage.c
net/x25/x25_subr.c

index b2a7b754fd1403a4c7dbd4ffe5ab47cd3de70b5e..a0e3bd861f1c1c9315d554a125e3d84dde9698af 100644 (file)
@@ -214,8 +214,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
 static void __devinit ns_init_card_error(ns_dev *card, int error);
 static scq_info *get_scq(int size, u32 scd);
 static void free_scq(scq_info *scq, struct atm_vcc *vcc);
-static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
-                       u32 handle2, u32 addr2);
+static void push_rxbufs(ns_dev *, struct sk_buff *);
 static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
 static int ns_open(struct atm_vcc *vcc);
 static void ns_close(struct atm_vcc *vcc);
@@ -766,6 +765,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
          ns_init_card_error(card, error);
         return error;
       }
+      NS_SKB_CB(hb)->buf_type = BUF_NONE;
       skb_queue_tail(&card->hbpool.queue, hb);
       card->hbpool.count++;
    }
@@ -786,9 +786,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
          ns_init_card_error(card, error);
         return error;
       }
+      NS_SKB_CB(lb)->buf_type = BUF_LG;
       skb_queue_tail(&card->lbpool.queue, lb);
       skb_reserve(lb, NS_SMBUFSIZE);
-      push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
+      push_rxbufs(card, lb);
       /* Due to the implementation of push_rxbufs() this is 1, not 0 */
       if (j == 1)
       {
@@ -822,9 +823,10 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
          ns_init_card_error(card, error);
         return error;
       }
+      NS_SKB_CB(sb)->buf_type = BUF_SM;
       skb_queue_tail(&card->sbpool.queue, sb);
       skb_reserve(sb, NS_AAL0_HEADER);
-      push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
+      push_rxbufs(card, sb);
    }
    /* Test for strange behaviour which leads to crashes */
    if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
@@ -852,6 +854,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
          ns_init_card_error(card, error);
         return error;
       }
+      NS_SKB_CB(iovb)->buf_type = BUF_NONE;
       skb_queue_tail(&card->iovpool.queue, iovb);
       card->iovpool.count++;
    }
@@ -1078,12 +1081,18 @@ static void free_scq(scq_info *scq, struct atm_vcc *vcc)
 
 /* The handles passed must be pointers to the sk_buff containing the small
    or large buffer(s) cast to u32. */
-static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
-                       u32 handle2, u32 addr2)
+static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
 {
+   struct ns_skb_cb *cb = NS_SKB_CB(skb);
+   u32 handle1, addr1;
+   u32 handle2, addr2;
    u32 stat;
    unsigned long flags;
    
+   /* *BARF* */
+   handle2 = addr2 = 0;
+   handle1 = (u32)skb;
+   addr1 = (u32)virt_to_bus(skb->data);
 
 #ifdef GENERAL_DEBUG
    if (!addr1)
@@ -1093,7 +1102,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
    stat = readl(card->membase + STAT);
    card->sbfqc = ns_stat_sfbqc_get(stat);
    card->lbfqc = ns_stat_lfbqc_get(stat);
-   if (type == BUF_SM)
+   if (cb->buf_type == BUF_SM)
    {
       if (!addr2)
       {
@@ -1111,7 +1120,7 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
         }
       }      
    }
-   else /* type == BUF_LG */
+   else /* buf_type == BUF_LG */
    {
       if (!addr2)
       {
@@ -1132,26 +1141,26 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
 
    if (addr2)
    {
-      if (type == BUF_SM)
+      if (cb->buf_type == BUF_SM)
       {
          if (card->sbfqc >= card->sbnr.max)
          {
-            skb_unlink((struct sk_buff *) handle1);
+            skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
             dev_kfree_skb_any((struct sk_buff *) handle1);
-            skb_unlink((struct sk_buff *) handle2);
+            skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
             dev_kfree_skb_any((struct sk_buff *) handle2);
             return;
          }
         else
             card->sbfqc += 2;
       }
-      else /* (type == BUF_LG) */
+      else /* (buf_type == BUF_LG) */
       {
          if (card->lbfqc >= card->lbnr.max)
          {
-            skb_unlink((struct sk_buff *) handle1);
+            skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
             dev_kfree_skb_any((struct sk_buff *) handle1);
-            skb_unlink((struct sk_buff *) handle2);
+            skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
             dev_kfree_skb_any((struct sk_buff *) handle2);
             return;
          }
@@ -1166,12 +1175,12 @@ static void push_rxbufs(ns_dev *card, u32 type, u32 handle1, u32 addr1,
       writel(handle2, card->membase + DR2);
       writel(addr1, card->membase + DR1);
       writel(handle1, card->membase + DR0);
-      writel(NS_CMD_WRITE_FREEBUFQ | (u32) type, card->membase + CMD);
+      writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
  
       spin_unlock_irqrestore(&card->res_lock, flags);
 
       XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
-              (type == BUF_SM ? "small" : "large"), addr1, addr2);
+              (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
    }
 
    if (!card->efbie && card->sbfqc >= card->sbnr.min &&
@@ -1322,9 +1331,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
             card->efbie = 0;
             break;
          }
+         NS_SKB_CB(sb)->buf_type = BUF_SM;
          skb_queue_tail(&card->sbpool.queue, sb);
          skb_reserve(sb, NS_AAL0_HEADER);
-         push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
+         push_rxbufs(card, sb);
       }
       card->sbfqc = i;
       process_rsq(card);
@@ -1348,9 +1358,10 @@ static irqreturn_t ns_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
             card->efbie = 0;
             break;
          }
+         NS_SKB_CB(lb)->buf_type = BUF_LG;
          skb_queue_tail(&card->lbpool.queue, lb);
          skb_reserve(lb, NS_SMBUFSIZE);
-         push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
+         push_rxbufs(card, lb);
       }
       card->lbfqc = i;
       process_rsq(card);
@@ -2227,6 +2238,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
             recycle_rx_buf(card, skb);
             return;
         }
+         NS_SKB_CB(iovb)->buf_type = BUF_NONE;
       }
       else
          if (--card->iovpool.count < card->iovnr.min)
@@ -2234,6 +2246,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
            struct sk_buff *new_iovb;
            if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
            {
+               NS_SKB_CB(iovb)->buf_type = BUF_NONE;
                skb_queue_tail(&card->iovpool.queue, new_iovb);
                card->iovpool.count++;
            }
@@ -2264,7 +2277,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
 
    if (NS_SKB(iovb)->iovcnt == 1)
    {
-      if (skb->list != &card->sbpool.queue)
+      if (NS_SKB_CB(skb)->buf_type != BUF_SM)
       {
          printk("nicstar%d: Expected a small buffer, and this is not one.\n",
                card->index);
@@ -2278,7 +2291,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
    }
    else /* NS_SKB(iovb)->iovcnt >= 2 */
    {
-      if (skb->list != &card->lbpool.queue)
+      if (NS_SKB_CB(skb)->buf_type != BUF_LG)
       {
          printk("nicstar%d: Expected a large buffer, and this is not one.\n",
                card->index);
@@ -2322,8 +2335,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
          /* skb points to a small buffer */
          if (!atm_charge(vcc, skb->truesize))
          {
-            push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
-                        0, 0);
+            push_rxbufs(card, skb);
             atomic_inc(&vcc->stats->rx_drop);
          }
          else
@@ -2350,8 +2362,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
         {
             if (!atm_charge(vcc, sb->truesize))
             {
-               push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
-                           0, 0);
+               push_rxbufs(card, sb);
                atomic_inc(&vcc->stats->rx_drop);
             }
             else
@@ -2367,16 +2378,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
                atomic_inc(&vcc->stats->rx);
             }
 
-            push_rxbufs(card, BUF_LG, (u32) skb,
-                          (u32) virt_to_bus(skb->data), 0, 0);
+            push_rxbufs(card, skb);
 
         }
         else                   /* len > NS_SMBUFSIZE, the usual case */
         {
             if (!atm_charge(vcc, skb->truesize))
             {
-               push_rxbufs(card, BUF_LG, (u32) skb,
-                           (u32) virt_to_bus(skb->data), 0, 0);
+               push_rxbufs(card, skb);
                atomic_inc(&vcc->stats->rx_drop);
             }
             else
@@ -2394,8 +2403,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
                atomic_inc(&vcc->stats->rx);
             }
 
-            push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
-                        0, 0);
+            push_rxbufs(card, sb);
 
          }
         
@@ -2430,6 +2438,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
                   card->hbpool.count++;
                }
             }
+            NS_SKB_CB(hb)->buf_type = BUF_NONE;
         }
         else
          if (--card->hbpool.count < card->hbnr.min)
@@ -2437,6 +2446,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
             struct sk_buff *new_hb;
             if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
             {
+               NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
                skb_queue_tail(&card->hbpool.queue, new_hb);
                card->hbpool.count++;
             }
@@ -2444,6 +2454,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
            {
                if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
                {
+                  NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
                   skb_queue_tail(&card->hbpool.queue, new_hb);
                   card->hbpool.count++;
                }
@@ -2473,8 +2484,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
             remaining = len - iov->iov_len;
             iov++;
             /* Free the small buffer */
-            push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data),
-                        0, 0);
+            push_rxbufs(card, sb);
 
             /* Copy all large buffers to the huge buffer and free them */
             for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
@@ -2485,8 +2495,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
                skb_put(hb, tocopy);
                iov++;
                remaining -= tocopy;
-               push_rxbufs(card, BUF_LG, (u32) lb,
-                           (u32) virt_to_bus(lb->data), 0, 0);
+               push_rxbufs(card, lb);
             }
 #ifdef EXTRA_DEBUG
             if (remaining != 0 || hb->len != len)
@@ -2527,9 +2536,10 @@ static void ns_sb_destructor(struct sk_buff *sb)
       sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
       if (sb == NULL)
          break;
+      NS_SKB_CB(sb)->buf_type = BUF_SM;
       skb_queue_tail(&card->sbpool.queue, sb);
       skb_reserve(sb, NS_AAL0_HEADER);
-      push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
+      push_rxbufs(card, sb);
    } while (card->sbfqc < card->sbnr.min);
 }
 
@@ -2550,9 +2560,10 @@ static void ns_lb_destructor(struct sk_buff *lb)
       lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
       if (lb == NULL)
          break;
+      NS_SKB_CB(lb)->buf_type = BUF_LG;
       skb_queue_tail(&card->lbpool.queue, lb);
       skb_reserve(lb, NS_SMBUFSIZE);
-      push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
+      push_rxbufs(card, lb);
    } while (card->lbfqc < card->lbnr.min);
 }
 
@@ -2569,6 +2580,7 @@ static void ns_hb_destructor(struct sk_buff *hb)
       hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
       if (hb == NULL)
          break;
+      NS_SKB_CB(hb)->buf_type = BUF_NONE;
       skb_queue_tail(&card->hbpool.queue, hb);
       card->hbpool.count++;
    }
@@ -2577,45 +2589,25 @@ static void ns_hb_destructor(struct sk_buff *hb)
 #endif /* NS_USE_DESTRUCTORS */
 
 
-
 static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
 {
-   if (skb->list == &card->sbpool.queue)
-      push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
-   else if (skb->list == &card->lbpool.queue)
-      push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data), 0, 0);
-   else
-   {
-      printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
-      dev_kfree_skb_any(skb);
-   }
-}
+       struct ns_skb_cb *cb = NS_SKB_CB(skb);
 
+       if (unlikely(cb->buf_type == BUF_NONE)) {
+               printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
+               dev_kfree_skb_any(skb);
+       } else
+               push_rxbufs(card, skb);
+}
 
 
 static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
 {
-   struct sk_buff *skb;
-
-   for (; count > 0; count--)
-   {
-      skb = (struct sk_buff *) (iov++)->iov_base;
-      if (skb->list == &card->sbpool.queue)
-         push_rxbufs(card, BUF_SM, (u32) skb, (u32) virt_to_bus(skb->data),
-                    0, 0);
-      else if (skb->list == &card->lbpool.queue)
-         push_rxbufs(card, BUF_LG, (u32) skb, (u32) virt_to_bus(skb->data),
-                    0, 0);
-      else
-      {
-         printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
-         dev_kfree_skb_any(skb);
-      }
-   }
+       while (count-- > 0)
+               recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
 }
 
 
-
 static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
 {
    if (card->iovpool.count < card->iovnr.max)
@@ -2631,7 +2623,7 @@ static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
 
 static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
 {
-   skb_unlink(sb);
+   skb_unlink(sb, &card->sbpool.queue);
 #ifdef NS_USE_DESTRUCTORS
    if (card->sbfqc < card->sbnr.min)
 #else
@@ -2640,10 +2632,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
       struct sk_buff *new_sb;
       if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
       {
+         NS_SKB_CB(new_sb)->buf_type = BUF_SM;
          skb_queue_tail(&card->sbpool.queue, new_sb);
          skb_reserve(new_sb, NS_AAL0_HEADER);
-         push_rxbufs(card, BUF_SM, (u32) new_sb,
-                     (u32) virt_to_bus(new_sb->data), 0, 0);
+         push_rxbufs(card, new_sb);
       }
    }
    if (card->sbfqc < card->sbnr.init)
@@ -2652,10 +2644,10 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
       struct sk_buff *new_sb;
       if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
       {
+         NS_SKB_CB(new_sb)->buf_type = BUF_SM;
          skb_queue_tail(&card->sbpool.queue, new_sb);
          skb_reserve(new_sb, NS_AAL0_HEADER);
-         push_rxbufs(card, BUF_SM, (u32) new_sb,
-                     (u32) virt_to_bus(new_sb->data), 0, 0);
+         push_rxbufs(card, new_sb);
       }
    }
 }
@@ -2664,7 +2656,7 @@ static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
 
 static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
 {
-   skb_unlink(lb);
+   skb_unlink(lb, &card->lbpool.queue);
 #ifdef NS_USE_DESTRUCTORS
    if (card->lbfqc < card->lbnr.min)
 #else
@@ -2673,10 +2665,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
       struct sk_buff *new_lb;
       if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
       {
+         NS_SKB_CB(new_lb)->buf_type = BUF_LG;
          skb_queue_tail(&card->lbpool.queue, new_lb);
          skb_reserve(new_lb, NS_SMBUFSIZE);
-         push_rxbufs(card, BUF_LG, (u32) new_lb,
-                     (u32) virt_to_bus(new_lb->data), 0, 0);
+         push_rxbufs(card, new_lb);
       }
    }
    if (card->lbfqc < card->lbnr.init)
@@ -2685,10 +2677,10 @@ static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
       struct sk_buff *new_lb;
       if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
       {
+         NS_SKB_CB(new_lb)->buf_type = BUF_LG;
          skb_queue_tail(&card->lbpool.queue, new_lb);
          skb_reserve(new_lb, NS_SMBUFSIZE);
-         push_rxbufs(card, BUF_LG, (u32) new_lb,
-                     (u32) virt_to_bus(new_lb->data), 0, 0);
+         push_rxbufs(card, new_lb);
       }
    }
 }
@@ -2880,9 +2872,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
                   sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
                   if (sb == NULL)
                      return -ENOMEM;
+                  NS_SKB_CB(sb)->buf_type = BUF_SM;
                   skb_queue_tail(&card->sbpool.queue, sb);
                   skb_reserve(sb, NS_AAL0_HEADER);
-                  push_rxbufs(card, BUF_SM, (u32) sb, (u32) virt_to_bus(sb->data), 0, 0);
+                  push_rxbufs(card, sb);
               }
               break;
 
@@ -2894,9 +2887,10 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
                   lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
                   if (lb == NULL)
                      return -ENOMEM;
+                  NS_SKB_CB(lb)->buf_type = BUF_LG;
                   skb_queue_tail(&card->lbpool.queue, lb);
                   skb_reserve(lb, NS_SMBUFSIZE);
-                  push_rxbufs(card, BUF_LG, (u32) lb, (u32) virt_to_bus(lb->data), 0, 0);
+                  push_rxbufs(card, lb);
               }
               break;
 
@@ -2923,6 +2917,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
                   hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
                   if (hb == NULL)
                      return -ENOMEM;
+                  NS_SKB_CB(hb)->buf_type = BUF_NONE;
                   ns_grab_int_lock(card, flags);
                   skb_queue_tail(&card->hbpool.queue, hb);
                   card->hbpool.count++;
@@ -2953,6 +2948,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
                   iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
                   if (iovb == NULL)
                      return -ENOMEM;
+                  NS_SKB_CB(iovb)->buf_type = BUF_NONE;
                   ns_grab_int_lock(card, flags);
                   skb_queue_tail(&card->iovpool.queue, iovb);
                   card->iovpool.count++;
@@ -2979,17 +2975,12 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
 }
 
 
-
 static void which_list(ns_dev *card, struct sk_buff *skb)
 {
-   printk("It's a %s buffer.\n", skb->list == &card->sbpool.queue ?
-          "small" : skb->list == &card->lbpool.queue ? "large" :
-         skb->list == &card->hbpool.queue ? "huge" :
-         skb->list == &card->iovpool.queue ? "iovec" : "unknown");
+       printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
 }
 
 
-
 static void ns_poll(unsigned long arg)
 {
    int i;
index ea83c46c8ba58b1c63eadceb973d52e45ed816f4..5997bcb45b59492486bafb4d951b6171a6a01049 100644 (file)
 
 #define NS_IOREMAP_SIZE 4096
 
-#define BUF_SM 0x00000000      /* These two are used for push_rxbufs() */
-#define BUF_LG 0x00000001       /* CMD, Write_FreeBufQ, LBUF bit */
+/*
+ * BUF_XX distinguish the Rx buffers depending on their (small/large) size.
+ * BUG_SM and BUG_LG are both used by the driver and the device.
+ * BUF_NONE is only used by the driver.
+ */
+#define BUF_SM         0x00000000      /* These two are used for push_rxbufs() */
+#define BUF_LG         0x00000001      /* CMD, Write_FreeBufQ, LBUF bit */
+#define BUF_NONE       0xffffffff      /* Software only: */
 
 #define NS_HBUFSIZE 65568      /* Size of max. AAL5 PDU */
 #define NS_MAX_IOVECS (2 + (65568 - NS_SMBUFSIZE) / \
@@ -684,6 +690,12 @@ enum ns_regs
 /* Device driver structures ***************************************************/
 
 
+struct ns_skb_cb {
+       u32 buf_type;                   /* BUF_SM/BUF_LG/BUF_NONE */
+};
+
+#define NS_SKB_CB(skb) ((struct ns_skb_cb *)((skb)->cb))
+
 typedef struct tsq_info
 {
    void *org;
index a2b236a966e0599285698160e44a56161f94be5a..85fee9530fa98c87db373260f8e97ffd8156d81f 100644 (file)
@@ -417,10 +417,12 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
                chan = (here[3] & uPD98401_AAL5_CHAN) >>
                    uPD98401_AAL5_CHAN_SHIFT;
                if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
+                       int pos = ZATM_VCC(vcc)->pool;
+
                        vcc = zatm_dev->rx_map[chan];
-                       if (skb == zatm_dev->last_free[ZATM_VCC(vcc)->pool])
-                               zatm_dev->last_free[ZATM_VCC(vcc)->pool] = NULL;
-                       skb_unlink(skb);
+                       if (skb == zatm_dev->last_free[pos])
+                               zatm_dev->last_free[pos] = NULL;
+                       skb_unlink(skb, zatm_dev->pool + pos);
                }
                else {
                        printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
index c42d7e6ac1c5858c851096f38ff6afe65185ecc4..e8d2a340356deec78f56917f330d92f5ab96c3ec 100644 (file)
@@ -158,7 +158,7 @@ static int bfusb_send_bulk(struct bfusb *bfusb, struct sk_buff *skb)
        if (err) {
                BT_ERR("%s bulk tx submit failed urb %p err %d", 
                                        bfusb->hdev->name, urb, err);
-               skb_unlink(skb);
+               skb_unlink(skb, &bfusb->pending_q);
                usb_free_urb(urb);
        } else
                atomic_inc(&bfusb->pending_tx);
@@ -212,7 +212,7 @@ static void bfusb_tx_complete(struct urb *urb, struct pt_regs *regs)
 
        read_lock(&bfusb->lock);
 
-       skb_unlink(skb);
+       skb_unlink(skb, &bfusb->pending_q);
        skb_queue_tail(&bfusb->completed_q, skb);
 
        bfusb_tx_wakeup(bfusb);
@@ -253,7 +253,7 @@ static int bfusb_rx_submit(struct bfusb *bfusb, struct urb *urb)
        if (err) {
                BT_ERR("%s bulk rx submit failed urb %p err %d",
                                        bfusb->hdev->name, urb, err);
-               skb_unlink(skb);
+               skb_unlink(skb, &bfusb->pending_q);
                kfree_skb(skb);
                usb_free_urb(urb);
        }
@@ -398,7 +398,7 @@ static void bfusb_rx_complete(struct urb *urb, struct pt_regs *regs)
                buf   += len;
        }
 
-       skb_unlink(skb);
+       skb_unlink(skb, &bfusb->pending_q);
        kfree_skb(skb);
 
        bfusb_rx_submit(bfusb, urb);
index b248d89de8b4e26c543d5b30c2bd7a187d657059..d633770fac8ee3b99a3a77c84356db02be2bf7c6 100644 (file)
@@ -681,7 +681,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
                 return;
         }
 
-       __skb_unlink(skb, skb->list);
+       __skb_unlink(skb, &host->pending_packet_queue);
 
        if (packet->state == hpsb_queued) {
                packet->sendtime = jiffies;
@@ -989,7 +989,7 @@ void abort_timedouts(unsigned long __opaque)
                packet = (struct hpsb_packet *)skb->data;
 
                if (time_before(packet->sendtime + expire, jiffies)) {
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, &host->pending_packet_queue);
                        packet->state = hpsb_complete;
                        packet->ack_code = ACKX_TIMEOUT;
                        queue_packet_complete(packet);
index afa46681f9833c576e3afd7b54f9a74debdac2bc..6ae6eb32211141a93c299a65f15dd8bd14e20a79 100644 (file)
@@ -606,7 +606,7 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
                 if ((((m->msg.data_b3_req.fakencci >> 8) & 0xff) == chan->ncci) &&
                    (m->msg.data_b3_req.blocknr == blocknr)) {
                        /* found corresponding DATA_B3_REQ */
-                        skb_unlink(tmp);
+                        skb_unlink(tmp, &card->ackq);
                        chan->queued -= m->msg.data_b3_req.datalen;
                        if (m->msg.data_b3_req.flags)
                                ret = m->msg.data_b3_req.datalen;
index 3ad0b6751f6fb3c432ea883d8f753b7831a5c8e4..221354eea21f9a9257771e68074821ea7ea64c2f 100644 (file)
@@ -156,52 +156,6 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
         
        SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
        
-#ifdef SHAPER_COMPLEX /* and broken.. */
-
-       while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
-       {
-               if(ptr->pri<skb->pri 
-                       && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
-               {
-                       struct sk_buff *tmp=ptr->prev;
-
-                       /*
-                        *      It goes before us therefore we slip the length
-                        *      of the new frame.
-                        */
-
-                       SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
-                       SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
-
-                       /*
-                        *      The packet may have slipped so far back it
-                        *      fell off.
-                        */
-                       if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
-                       {
-                               skb_unlink(ptr);
-                               dev_kfree_skb(ptr);
-                       }
-                       ptr=tmp;
-               }
-               else
-                       break;
-       }
-       if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
-               skb_queue_head(&shaper->sendq,skb);
-       else
-       {
-               struct sk_buff *tmp;
-               /*
-                *      Set the packet clock out time according to the
-                *      frames ahead. Im sure a bit of thought could drop
-                *      this loop.
-                */
-               for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
-                       SHAPERCB(skb)->shapeclock+=tmp->shapelen;
-               skb_append(ptr,skb);
-       }
-#else
        {
                struct sk_buff *tmp;
                /*
@@ -220,7 +174,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
                } else
                        skb_queue_tail(&shaper->sendq, skb);
        }
-#endif         
+
        if(sh_debug)
                printk("Frame queued.\n");
        if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
@@ -302,7 +256,7 @@ static void shaper_kick(struct shaper *shaper)
                         *      Pull the frame and get interrupts back on.
                         */
                         
-                       skb_unlink(skb);
+                       skb_unlink(skb, &shaper->sendq);
                        if (shaper->recovery < 
                            SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
                                shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
index c5f5e62aab8b8c31814fea2fa900799db0c061c3..0497dbdb8631fe379c7f952dc2c50ee7c21bd814 100644 (file)
@@ -445,7 +445,7 @@ void        s508_s514_unlock(sdla_t *card, unsigned long *smp_flags);
 void   s508_s514_lock(sdla_t *card, unsigned long *smp_flags);
 
 unsigned short calc_checksum (char *, int);
-static int setup_fr_header(struct sk_buff** skb,
+static int setup_fr_header(struct sk_buff *skb,
                           struct net_device* dev, char op_mode);
 
 
@@ -1372,7 +1372,7 @@ static int if_send(struct sk_buff* skb, struct net_device* dev)
        /* Move the if_header() code to here. By inserting frame
         * relay header in if_header() we would break the
         * tcpdump and other packet sniffers */
-       chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby);
+       chan->fr_header_len = setup_fr_header(skb,dev,chan->common.usedby);
        if (chan->fr_header_len < 0 ){
                ++chan->ifstats.tx_dropped;
                ++card->wandev.stats.tx_dropped;
@@ -1597,8 +1597,6 @@ static int setup_for_delayed_transmit(struct net_device* dev,
                return 1;
        }
 
-       skb_unlink(skb);
-       
         chan->transmit_length = len;
        chan->delay_skb = skb;
         
@@ -4871,18 +4869,15 @@ static void unconfig_fr (sdla_t *card)
        }
 }
 
-static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
+static int setup_fr_header(struct sk_buff *skb, struct net_device* dev,
                           char op_mode)
 {
-       struct sk_buff *skb = *skb_orig;
        fr_channel_t *chan=dev->priv;
 
-       if (op_mode == WANPIPE){
-
+       if (op_mode == WANPIPE) {
                chan->fr_header[0]=Q922_UI;
                
                switch (htons(skb->protocol)){
-                       
                case ETH_P_IP:
                        chan->fr_header[1]=NLPID_IP;
                        break;
@@ -4894,16 +4889,14 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
        }
 
        /* If we are in bridging mode, we must apply
-        * an Ethernet header */
-       if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){
-
-
+        * an Ethernet header
+        */
+       if (op_mode == BRIDGE || op_mode == BRIDGE_NODE) {
                /* Encapsulate the packet as a bridged Ethernet frame. */
 #ifdef DEBUG
                printk(KERN_INFO "%s: encapsulating skb for frame relay\n", 
                        dev->name);
 #endif
-               
                chan->fr_header[0] = 0x03;
                chan->fr_header[1] = 0x00;
                chan->fr_header[2] = 0x80;
@@ -4916,7 +4909,6 @@ static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
                /* Yuck. */
                skb->protocol = ETH_P_802_3;
                return 8;
-
        }
                
        return 0;
index 4528a00c45b010f8ad6ef103e5eaae6ba8ae83a1..a2f67245f6da37adac5a5bb93e68aea202c3aafb 100644 (file)
@@ -2903,19 +2903,18 @@ static struct net_device_stats *usbnet_get_stats (struct net_device *net)
  * completion callbacks.  2.5 should have fixed those bugs...
  */
 
-static void defer_bh (struct usbnet *dev, struct sk_buff *skb)
+static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
 {
-       struct sk_buff_head     *list = skb->list;
        unsigned long           flags;
 
-       spin_lock_irqsave (&list->lock, flags);
-       __skb_unlink (skb, list);
-       spin_unlock (&list->lock);
-       spin_lock (&dev->done.lock);
-       __skb_queue_tail (&dev->done, skb);
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_unlink(skb, list);
+       spin_unlock(&list->lock);
+       spin_lock(&dev->done.lock);
+       __skb_queue_tail(&dev->done, skb);
        if (dev->done.qlen == 1)
-               tasklet_schedule (&dev->bh);
-       spin_unlock_irqrestore (&dev->done.lock, flags);
+               tasklet_schedule(&dev->bh);
+       spin_unlock_irqrestore(&dev->done.lock, flags);
 }
 
 /* some work can't be done in tasklets, so we use keventd
@@ -3120,7 +3119,7 @@ block:
                break;
        }
 
-       defer_bh (dev, skb);
+       defer_bh(dev, skb, &dev->rxq);
 
        if (urb) {
                if (netif_running (dev->net)
@@ -3490,7 +3489,7 @@ static void tx_complete (struct urb *urb, struct pt_regs *regs)
 
        urb->dev = NULL;
        entry->state = tx_done;
-       defer_bh (dev, skb);
+       defer_bh(dev, skb, &dev->txq);
 }
 
 /*-------------------------------------------------------------------------*/
index 4b929c3c1a983fc54ee5ecfbc04a672df09740a1..76c68851474c3f549cd966c932ffa000c48490e4 100644 (file)
@@ -204,7 +204,6 @@ struct sk_buff {
        struct sk_buff          *next;
        struct sk_buff          *prev;
 
-       struct sk_buff_head     *list;
        struct sock             *sk;
        struct timeval          stamp;
        struct net_device       *dev;
@@ -597,7 +596,6 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
 {
        struct sk_buff *prev, *next;
 
-       newsk->list = list;
        list->qlen++;
        prev = (struct sk_buff *)list;
        next = prev->next;
@@ -622,7 +620,6 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
 {
        struct sk_buff *prev, *next;
 
-       newsk->list = list;
        list->qlen++;
        next = (struct sk_buff *)list;
        prev = next->prev;
@@ -655,7 +652,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
                next->prev   = prev;
                prev->next   = next;
                result->next = result->prev = NULL;
-               result->list = NULL;
        }
        return result;
 }
@@ -664,7 +660,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 /*
  *     Insert a packet on a list.
  */
-extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk);
+extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
 static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
@@ -672,24 +668,23 @@ static inline void __skb_insert(struct sk_buff *newsk,
        newsk->next = next;
        newsk->prev = prev;
        next->prev  = prev->next = newsk;
-       newsk->list = list;
        list->qlen++;
 }
 
 /*
  *     Place a packet after a given packet in a list.
  */
-extern void       skb_append(struct sk_buff *old, struct sk_buff *newsk);
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+extern void       skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
+static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
 {
-       __skb_insert(newsk, old, old->next, old->list);
+       __skb_insert(newsk, old, old->next, list);
 }
 
 /*
  * remove sk_buff from list. _Must_ be called atomically, and with
  * the list known..
  */
-extern void       skb_unlink(struct sk_buff *skb);
+extern void       skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
        struct sk_buff *next, *prev;
@@ -698,7 +693,6 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
        next       = skb->next;
        prev       = skb->prev;
        skb->next  = skb->prev = NULL;
-       skb->list  = NULL;
        next->prev = prev;
        prev->next = next;
 }
index 181a3002d8adc5eebfc117e609ba4427ccf8ff2f..4b1faca5013ff8d573ef784253c77e867be1fcbc 100644 (file)
@@ -34,7 +34,6 @@
 
 void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
 {
-       struct sk_buff *skb;
        unsigned long flags;
        struct sk_buff *skb_from = (struct sk_buff *) from;
        struct sk_buff *skb_to = (struct sk_buff *) to;
@@ -47,8 +46,6 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
        prev->next = skb_to;
        to->prev->next = from->next;
        to->prev = from->prev;
-       for (skb = from->next; skb != skb_to; skb = skb->next)
-               skb->list = to;
        to->qlen += from->qlen;
        spin_unlock(&to->lock);
        from->prev = skb_from;
index 99694b57f6f565d36f6787f58c8faa43cced25d2..eb7343c10a9f859ec0f8e5390793fda4539e7106 100644 (file)
@@ -76,7 +76,7 @@ void ax25_requeue_frames(ax25_cb *ax25)
                if (skb_prev == NULL)
                        skb_queue_head(&ax25->write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &ax25->write_queue);
                skb_prev = skb;
        }
 }
index 096991cb09d91e9a7d747b529b5fa3ad58720c2a..e6564b0a683939c9a5f9773ddd1ae2de0a7cec44 100644 (file)
@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb)
 
 void __kfree_skb(struct sk_buff *skb)
 {
-       BUG_ON(skb->list != NULL);
-
        dst_release(skb->dst);
 #ifdef CONFIG_XFRM
        secpath_put(skb->sp);
@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
 #define C(x) n->x = skb->x
 
        n->next = n->prev = NULL;
-       n->list = NULL;
        n->sk = NULL;
        C(stamp);
        C(dev);
@@ -403,7 +400,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
         */
        unsigned long offset = new->data - old->data;
 
-       new->list       = NULL;
        new->sk         = NULL;
        new->dev        = old->dev;
        new->real_dev   = old->real_dev;
@@ -1342,50 +1338,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
        __skb_queue_tail(list, newsk);
        spin_unlock_irqrestore(&list->lock, flags);
 }
+
 /**
  *     skb_unlink      -       remove a buffer from a list
  *     @skb: buffer to remove
+ *     @list: list to use
  *
- *     Place a packet after a given packet in a list. The list locks are taken
- *     and this function is atomic with respect to other list locked calls
+ *     Remove a packet from a list. The list locks are taken and this
+ *     function is atomic with respect to other list locked calls
  *
- *     Works even without knowing the list it is sitting on, which can be
- *     handy at times. It also means that THE LIST MUST EXIST when you
- *     unlink. Thus a list must have its contents unlinked before it is
- *     destroyed.
+ *     You must know what list the SKB is on.
  */
-void skb_unlink(struct sk_buff *skb)
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
-       struct sk_buff_head *list = skb->list;
-
-       if (list) {
-               unsigned long flags;
+       unsigned long flags;
 
-               spin_lock_irqsave(&list->lock, flags);
-               if (skb->list == list)
-                       __skb_unlink(skb, skb->list);
-               spin_unlock_irqrestore(&list->lock, flags);
-       }
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_unlink(skb, list);
+       spin_unlock_irqrestore(&list->lock, flags);
 }
 
-
 /**
  *     skb_append      -       append a buffer
  *     @old: buffer to insert after
  *     @newsk: buffer to insert
+ *     @list: list to use
  *
  *     Place a packet after a given packet in a list. The list locks are taken
  *     and this function is atomic with respect to other list locked calls.
  *     A buffer cannot be placed on two lists at the same time.
  */
-
-void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&old->list->lock, flags);
-       __skb_append(old, newsk);
-       spin_unlock_irqrestore(&old->list->lock, flags);
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_append(old, newsk, list);
+       spin_unlock_irqrestore(&list->lock, flags);
 }
 
 
@@ -1393,19 +1382,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
  *     skb_insert      -       insert a buffer
  *     @old: buffer to insert before
  *     @newsk: buffer to insert
+ *     @list: list to use
+ *
+ *     Place a packet before a given packet in a list. The list locks are
+ *     taken and this function is atomic with respect to other list locked
+ *     calls.
  *
- *     Place a packet before a given packet in a list. The list locks are taken
- *     and this function is atomic with respect to other list locked calls
  *     A buffer cannot be placed on two lists at the same time.
  */
-
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&old->list->lock, flags);
-       __skb_insert(newsk, old->prev, old, old->list);
-       spin_unlock_irqrestore(&old->list->lock, flags);
+       spin_lock_irqsave(&list->lock, flags);
+       __skb_insert(newsk, old->prev, old, list);
+       spin_unlock_irqrestore(&list->lock, flags);
 }
 
 #if 0
index acdd18e6adb2172903a2e6519e331c5489646330..0c30409fe9e5e2240a37835f670cc6d5f1105d82 100644 (file)
@@ -1763,7 +1763,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
                nskb = skb->next;
 
                if (skb->len == 0) {
-                       skb_unlink(skb);
+                       skb_unlink(skb, queue);
                        kfree_skb(skb);
                        /* 
                         * N.B. Don't refer to skb or cb after this point
index 8cce1fdbda907ac29dc29c2ea530102cc8f36d86..e0bebf4bbcadf60998f8d54f429af41eb4cfe08d 100644 (file)
@@ -479,7 +479,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
                xmit_count = cb2->xmit_count;
                segnum = cb2->segnum;
                /* Remove and drop ack'ed packet */
-               skb_unlink(ack);
+               skb_unlink(ack, q);
                kfree_skb(ack);
                ack = NULL;
 
index de691e119e173fab412e8e90be336f9d3312e25f..b807a314269efede92cba696073faca001711bf7 100644 (file)
@@ -869,7 +869,7 @@ static void aun_tx_ack(unsigned long seq, int result)
 
 foundit:
        tx_result(skb->sk, eb->cookie, result);
-       skb_unlink(skb);
+       skb_unlink(skb, &aun_queue);
        spin_unlock_irqrestore(&aun_queue_lock, flags);
        kfree_skb(skb);
 }
@@ -947,7 +947,7 @@ static void ab_cleanup(unsigned long h)
                {
                        tx_result(skb->sk, eb->cookie, 
                                  ECTYPE_TRANSMIT_NOT_PRESENT);
-                       skb_unlink(skb);
+                       skb_unlink(skb, &aun_queue);
                        kfree_skb(skb);
                }
                skb = newskb;
index 69b1fcf70077262e39737ce5f87bb8effb1e85e9..d2696af46c70edfe27bd1fba4868921b74b05c51 100644 (file)
@@ -975,7 +975,7 @@ do_fault:
        if (!skb->len) {
                if (sk->sk_send_head == skb)
                        sk->sk_send_head = NULL;
-               __skb_unlink(skb, skb->list);
+               __skb_unlink(skb, &sk->sk_write_queue);
                sk_stream_free_skb(sk, skb);
        }
 
index 53a8a5399f1e6e6de3b13a05d691612c533f433c..ffa24025cd02236e1c8f789bddd6c273c9cf2ae2 100644 (file)
@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
                        seq_rtt = now - scb->when;
                tcp_dec_pcount_approx(&tp->fackets_out, skb);
                tcp_packets_out_dec(tp, skb);
-               __skb_unlink(skb, skb->list);
+               __skb_unlink(skb, &sk->sk_write_queue);
                sk_stream_free_skb(sk, skb);
        }
 
@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *sk)
 
                if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
                        SOCK_DEBUG(sk, "ofo packet was already received \n");
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, &tp->out_of_order_queue);
                        __kfree_skb(skb);
                        continue;
                }
@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *sk)
                           tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
                           TCP_SKB_CB(skb)->end_seq);
 
-               __skb_unlink(skb, skb->list);
+               __skb_unlink(skb, &tp->out_of_order_queue);
                __skb_queue_tail(&sk->sk_receive_queue, skb);
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
                if(skb->h.th->fin)
@@ -3027,7 +3027,7 @@ drop:
                u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
                if (seq == TCP_SKB_CB(skb1)->end_seq) {
-                       __skb_append(skb1, skb);
+                       __skb_append(skb1, skb, &tp->out_of_order_queue);
 
                        if (!tp->rx_opt.num_sacks ||
                            tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3071,7 @@ drop:
                               tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
                               break;
                       }
-                      __skb_unlink(skb1, skb1->list);
+                      __skb_unlink(skb1, &tp->out_of_order_queue);
                       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
                       __kfree_skb(skb1);
                }
@@ -3088,8 +3088,9 @@ add_sack:
  * simplifies code)
  */
 static void
-tcp_collapse(struct sock *sk, struct sk_buff *head,
-            struct sk_buff *tail, u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list,
+            struct sk_buff *head, struct sk_buff *tail,
+            u32 start, u32 end)
 {
        struct sk_buff *skb;
 
@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
                /* No new bits? It is possible on ofo queue. */
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                        struct sk_buff *next = skb->next;
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, list);
                        __kfree_skb(skb);
                        NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
                        skb = next;
@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
                nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
-               __skb_insert(nskb, skb->prev, skb, skb->list);
+               __skb_insert(nskb, skb->prev, skb, list);
                sk_stream_set_owner_r(nskb, sk);
 
                /* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
                        }
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
                                struct sk_buff *next = skb->next;
-                               __skb_unlink(skb, skb->list);
+                               __skb_unlink(skb, list);
                                __kfree_skb(skb);
                                NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
                                skb = next;
@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
                if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
-                       tcp_collapse(sk, head, skb, start, end);
+                       tcp_collapse(sk, &tp->out_of_order_queue,
+                                    head, skb, start, end);
                        head = skb;
                        if (skb == (struct sk_buff *)&tp->out_of_order_queue)
                                break;
@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *sk)
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
-       tcp_collapse(sk, sk->sk_receive_queue.next,
+       tcp_collapse(sk, &sk->sk_receive_queue,
+                    sk->sk_receive_queue.next,
                     (struct sk_buff*)&sk->sk_receive_queue,
                     tp->copied_seq, tp->rcv_nxt);
        sk_stream_mem_reclaim(sk);
@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
                struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
                tp->copied_seq++;
                if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, &sk->sk_receive_queue);
                        __kfree_skb(skb);
                }
        }
index dd30dd137b74dd061d74af66500f3d3294be13cf..a4d1eb9a0926bdd1a31166ddd01f01bb7a9b0e72 100644 (file)
@@ -505,7 +505,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
 
        /* Link BUFF into the send queue. */
        skb_header_release(buff);
-       __skb_append(skb, buff);
+       __skb_append(skb, buff, &sk->sk_write_queue);
 
        return 0;
 }
@@ -893,7 +893,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
 
        /* Link BUFF into the send queue. */
        skb_header_release(buff);
-       __skb_append(skb, buff);
+       __skb_append(skb, buff, &sk->sk_write_queue);
 
        return 0;
 }
@@ -1238,7 +1238,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
                       tcp_skb_pcount(next_skb) != 1);
 
                /* Ok.  We will be able to collapse the packet. */
-               __skb_unlink(next_skb, next_skb->list);
+               __skb_unlink(next_skb, &sk->sk_write_queue);
 
                memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
 
index 6dafbb43b5296922f62cacf8c596d9987d835c0f..eb65b4925b51efa679ba957e1238f6131de43b0c 100644 (file)
@@ -988,9 +988,6 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
                        IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
                        return;
                }
-               /* Unlink tx_skb from list */
-               tx_skb->next = tx_skb->prev = NULL;
-               tx_skb->list = NULL;
 
                /* Clear old Nr field + poll bit */
                tx_skb->data[1] &= 0x0f;
@@ -1063,9 +1060,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
                        IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
                        return;
                }
-               /* Unlink tx_skb from list */
-               tx_skb->next = tx_skb->prev = NULL;
-               tx_skb->list = NULL;
 
                /* Clear old Nr field + poll bit */
                tx_skb->data[1] &= 0x0f;
index 5de05a0bc0ffe11ab5e59e2edc93302d06df866f..8b5eefd70f03c3f686f14fc84947ac5e7ce90ab3 100644 (file)
@@ -78,7 +78,7 @@ void lapb_requeue_frames(struct lapb_cb *lapb)
                if (!skb_prev)
                        skb_queue_head(&lapb->write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &lapb->write_queue);
                skb_prev = skb;
        }
 }
index 20b4cfebd74ca543fc894b075802226d912011b3..f49b82da8264044cbda3252819fd9bb8f1021abe 100644 (file)
@@ -714,7 +714,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (uaddr)
                memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
        msg->msg_namelen = sizeof(*uaddr);
-       if (!skb->list) {
+       if (!skb->next) {
 dgram_free:
                kfree_skb(skb);
        }
index eba812a9c69c6e0e0de271b5ec275d696da8e52d..571548619469f060b718a6d79e87b58303d7a229 100644 (file)
@@ -71,7 +71,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
 
        if (!ev->ind_prim && !ev->cfm_prim) {
                /* indicate or confirm not required */
-               if (!skb->list)
+               /* XXX this is not very pretty, perhaps we should store
+                * XXX indicate/confirm-needed state in the llc_conn_state_ev
+                * XXX control block of the SKB instead? -DaveM
+                */
+               if (!skb->next)
                        goto out_kfree_skb;
                goto out_skb_put;
        }
index 0627347b14b88d7458b356e24aff9d051d464f75..252c1b3ecd78cb7ab03dcf607a222571e143f9b9 100644 (file)
@@ -77,7 +77,7 @@ void nr_requeue_frames(struct sock *sk)
                if (skb_prev == NULL)
                        skb_queue_head(&sk->sk_write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &sk->sk_write_queue);
                skb_prev = skb;
        }
 }
index 7db7e1cedc3aed3e0cb07ebb5630df776cbb16ad..ae135e27799b0e8f59ff98cda99ffeaca0d5f095 100644 (file)
@@ -74,7 +74,7 @@ void rose_requeue_frames(struct sock *sk)
                if (skb_prev == NULL)
                        skb_queue_head(&sk->sk_write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &sk->sk_write_queue);
                skb_prev = skb;
        }
 }
index 091a66f06a35a7518f9f1d2ab8143d0631b6efec..4454afe4727ef2bb95778a5a8193c8b74df6dcd9 100644 (file)
@@ -4892,7 +4892,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
        sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
                event = sctp_skb2event(skb);
                if (event->asoc == assoc) {
-                       __skb_unlink(skb, skb->list);
+                       __skb_unlink(skb, &oldsk->sk_receive_queue);
                        __skb_queue_tail(&newsk->sk_receive_queue, skb);
                }
        }
@@ -4921,7 +4921,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
                sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
                        event = sctp_skb2event(skb);
                        if (event->asoc == assoc) {
-                               __skb_unlink(skb, skb->list);
+                               __skb_unlink(skb, &oldsp->pd_lobby);
                                __skb_queue_tail(queue, skb);
                        }
                }
index 8bbc279d6c99378934a8f4d2fb3943a04bd70143..ec2c857eae7fecfd6800940975c6f91d4741d995 100644 (file)
@@ -50,9 +50,9 @@
 
 /* Forward declarations for internal helpers.  */
 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
-                                               struct sctp_ulpevent *);
+                                             struct sctp_ulpevent *);
 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
-                                               struct sctp_ulpevent *);
+                                             struct sctp_ulpevent *);
 
 /* 1st Level Abstractions */
 
@@ -125,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                event = sctp_ulpq_order(ulpq, event);
        }
 
-       /* Send event to the ULP.  */
+       /* Send event to the ULP.  'event' is the sctp_ulpevent for
+        * very first SKB on the 'temp' list.
+        */
        if (event)
                sctp_ulpq_tail_event(ulpq, event);
 
@@ -158,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
        return sctp_clear_pd(ulpq->asoc->base.sk);
 }
 
-
-
+/* If the SKB of 'event' is on a list, it is the first such member
+ * of that list.
+ */
 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 {
        struct sock *sk = ulpq->asoc->base.sk;
-       struct sk_buff_head *queue;
+       struct sk_buff_head *queue, *skb_list;
+       struct sk_buff *skb = sctp_event2skb(event);
        int clear_pd = 0;
 
+       skb_list = (struct sk_buff_head *) skb->prev;
+
        /* If the socket is just going to throw this away, do not
         * even try to deliver it.
         */
@@ -197,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
        /* If we are harvesting multiple skbs they will be
         * collected on a list.
         */
-       if (sctp_event2skb(event)->list)
-               sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
+       if (skb_list)
+               sctp_skb_list_tail(skb_list, queue);
        else
-               __skb_queue_tail(queue, sctp_event2skb(event));
+               __skb_queue_tail(queue, skb);
 
        /* Did we just complete partial delivery and need to get
         * rolling again?  Move pending data to the receive
@@ -214,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
        return 1;
 
 out_free:
-       if (sctp_event2skb(event)->list)
-               sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
+       if (skb_list)
+               sctp_queue_purge_ulpevents(skb_list);
        else
                sctp_ulpevent_free(event);
+
        return 0;
 }
 
@@ -269,7 +276,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  * payload was fragmented on the way and ip had to reassemble them.
  * We add the rest of skb's to the first skb's fraglist.
  */
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
+static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
 {
        struct sk_buff *pos;
        struct sctp_ulpevent *event;
@@ -294,7 +301,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
                skb_shinfo(f_frag)->frag_list = pos;
 
        /* Remove the first fragment from the reassembly queue.  */
-       __skb_unlink(f_frag, f_frag->list);
+       __skb_unlink(f_frag, queue);
        while (pos) {
 
                pnext = pos->next;
@@ -304,7 +311,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
                f_frag->data_len += pos->len;
 
                /* Remove the fragment from the reassembly queue.  */
-               __skb_unlink(pos, pos->list);
+               __skb_unlink(pos, queue);
        
                /* Break if we have reached the last fragment.  */
                if (pos == l_frag)
@@ -375,7 +382,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
 done:
        return retval;
 found:
-       retval = sctp_make_reassembled_event(first_frag, pos);
+       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
        if (retval)
                retval->msg_flags |= MSG_EOR;
        goto done;
@@ -435,7 +442,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(first_frag, last_frag);
+       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
        if (retval && is_last)
                retval->msg_flags |= MSG_EOR;
 
@@ -527,7 +534,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
         * further.
         */
 done:
-       retval = sctp_make_reassembled_event(first_frag, last_frag);
+       retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
        return retval;
 }
 
@@ -537,6 +544,7 @@ done:
 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
                                              struct sctp_ulpevent *event)
 {
+       struct sk_buff_head *event_list;
        struct sk_buff *pos, *tmp;
        struct sctp_ulpevent *cevent;
        struct sctp_stream *in;
@@ -547,6 +555,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
        ssn = event->ssn;
        in  = &ulpq->asoc->ssnmap->in;
 
+       event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
+
        /* We are holding the chunks by stream, by SSN.  */
        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
                cevent = (struct sctp_ulpevent *) pos->cb;
@@ -567,10 +577,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
                /* Found it, so mark in the ssnmap. */
                sctp_ssn_next(in, sid);
 
-               __skb_unlink(pos, pos->list);
+               __skb_unlink(pos, &ulpq->lobby);
 
                /* Attach all gathered skbs to the event.  */
-               __skb_queue_tail(sctp_event2skb(event)->list, pos);
+               __skb_queue_tail(event_list, pos);
        }
 }
 
@@ -626,7 +636,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 }
 
 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
-                                               struct sctp_ulpevent *event)
+                                            struct sctp_ulpevent *event)
 {
        __u16 sid, ssn;
        struct sctp_stream *in;
@@ -667,7 +677,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
 {
        struct sk_buff *pos, *tmp;
        struct sctp_ulpevent *cevent;
-       struct sctp_ulpevent *event = NULL;
+       struct sctp_ulpevent *event;
        struct sctp_stream *in;
        struct sk_buff_head temp;
        __u16 csid, cssn;
@@ -675,6 +685,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
        in  = &ulpq->asoc->ssnmap->in;
 
        /* We are holding the chunks by stream, by SSN.  */
+       skb_queue_head_init(&temp);
+       event = NULL;
        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
                cevent = (struct sctp_ulpevent *) pos->cb;
                csid = cevent->stream;
@@ -686,19 +698,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
                /* Found it, so mark in the ssnmap. */         
                sctp_ssn_next(in, csid);
 
-               __skb_unlink(pos, pos->list);
+               __skb_unlink(pos, &ulpq->lobby);
                if (!event) {                                           
                        /* Create a temporary list to collect chunks on.  */
                        event = sctp_skb2event(pos);
-                       skb_queue_head_init(&temp);
                        __skb_queue_tail(&temp, sctp_event2skb(event));
                } else {
                        /* Attach all gathered skbs to the event.  */
-                       __skb_queue_tail(sctp_event2skb(event)->list, pos);
+                       __skb_queue_tail(&temp, pos);
                }
        }
 
-       /* Send event to the ULP.  */
+       /* Send event to the ULP.  'event' is the sctp_ulpevent for
+        * very first SKB on the 'temp' list.
+        */
        if (event)
                sctp_ulpq_tail_event(ulpq, event);
 }
index 4bd95c8f5934a5ca77d265bfdf01e046339ca634..46252d2807bb6fd5c491488328955dac2d9f6d3f 100644 (file)
@@ -286,16 +286,16 @@ void unix_gc(void)
                        skb = skb_peek(&s->sk_receive_queue);
                        while (skb &&
                               skb != (struct sk_buff *)&s->sk_receive_queue) {
-                               nextsk=skb->next;
+                               nextsk = skb->next;
                                /*
                                 *      Do we have file descriptors ?
                                 */
-                               if(UNIXCB(skb).fp)
-                               {
-                                       __skb_unlink(skb, skb->list);
-                                       __skb_queue_tail(&hitlist,skb);
+                               if (UNIXCB(skb).fp) {
+                                       __skb_unlink(skb,
+                                                    &s->sk_receive_queue);
+                                       __skb_queue_tail(&hitlist, skb);
                                }
-                               skb=nextsk;
+                               skb = nextsk;
                        }
                        spin_unlock(&s->sk_receive_queue.lock);
                }
index 7fd872ad0c20a2acbb709e5cb8ca9c22bae7ebac..e20cfadad4d99d7043d6200a012c182454ae4041 100644 (file)
@@ -80,7 +80,7 @@ void x25_requeue_frames(struct sock *sk)
                if (!skb_prev)
                        skb_queue_head(&sk->sk_write_queue, skb);
                else
-                       skb_append(skb_prev, skb);
+                       skb_append(skb_prev, skb, &sk->sk_write_queue);
                skb_prev = skb;
        }
 }