]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[NET]: Transform skb_queue_len() binary tests into skb_queue_empty()
authorDavid S. Miller <davem@davemloft.net>
Fri, 8 Jul 2005 21:57:23 +0000 (14:57 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 8 Jul 2005 21:57:23 +0000 (14:57 -0700)
This is part of the grand scheme to eliminate the qlen
member of skb_queue_head, and subsequently remove the
'list' member of sk_buff.

Most users of skb_queue_len() want to know if the queue is
empty or not, and that's trivially done with skb_queue_empty()
which doesn't use the skb_queue_head->qlen member and instead
uses the queue list emptyness as the test.

Signed-off-by: David S. Miller <davem@davemloft.net>
34 files changed:
drivers/bluetooth/hci_vhci.c
drivers/isdn/hisax/isdnl1.c
drivers/isdn/hisax/isdnl2.c
drivers/isdn/hisax/isdnl3.c
drivers/isdn/i4l/isdn_tty.c
drivers/isdn/icn/icn.c
drivers/net/hamradio/scc.c
drivers/net/ppp_async.c
drivers/net/ppp_generic.c
drivers/net/ppp_synctty.c
drivers/net/tun.c
drivers/net/wireless/airo.c
drivers/s390/net/claw.c
drivers/s390/net/ctctty.c
drivers/usb/net/usbnet.c
include/net/irda/irda_device.h
include/net/tcp.h
net/bluetooth/cmtp/core.c
net/bluetooth/hidp/core.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/rfcomm/tty.c
net/decnet/af_decnet.c
net/decnet/dn_nsp_out.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_timer.c
net/irda/irlap.c
net/irda/irlap_event.c
net/irda/irlap_frame.c
net/irda/irttp.c
net/llc/llc_c_ev.c
net/netlink/af_netlink.c
net/sched/sch_red.c
net/unix/af_unix.c

index 3256192dcde8f72759f0aab798b03cb9739b5a55..f9b956fb2b8b81ac774cb772b21d06aee58a0a7f 100644 (file)
@@ -120,7 +120,7 @@ static unsigned int hci_vhci_chr_poll(struct file *file, poll_table * wait)
 
        poll_wait(file, &hci_vhci->read_wait, wait);
  
-       if (skb_queue_len(&hci_vhci->readq))
+       if (!skb_queue_empty(&hci_vhci->readq))
                return POLLIN | POLLRDNORM;
 
        return POLLOUT | POLLWRNORM;
index ac899503a74f1ee49b117de3b0d12d670a5a7ab0..bab356886483e257b7c718e4bde5fe6c7e3ae9f7 100644 (file)
@@ -279,7 +279,8 @@ BChannel_proc_xmt(struct BCState *bcs)
        if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags))
                st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
        if (!test_bit(BC_FLG_ACTIV, &bcs->Flag)) {
-               if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && (!skb_queue_len(&bcs->squeue))) {
+               if (!test_bit(BC_FLG_BUSY, &bcs->Flag) &&
+                   skb_queue_empty(&bcs->squeue)) {
                        st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL);
                }
        }
index 9022583fd6a06e97b76a6c44dddcc0b7d7eacf8b..1615c1a76ab8ace39682e503a41059f2de671d18 100644 (file)
@@ -108,7 +108,8 @@ static int l2addrsize(struct Layer2 *l2);
 static void
 set_peer_busy(struct Layer2 *l2) {
        test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
-       if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
+       if (!skb_queue_empty(&l2->i_queue) ||
+           !skb_queue_empty(&l2->ui_queue))
                test_and_set_bit(FLG_L2BLOCK, &l2->flag);
 }
 
@@ -754,7 +755,7 @@ l2_restart_multi(struct FsmInst *fi, int event, void *arg)
                st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
 
        if ((ST_L2_7==state) || (ST_L2_8 == state))
-               if (skb_queue_len(&st->l2.i_queue) && cansend(st))
+               if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
                        st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
 }
 
@@ -810,7 +811,7 @@ l2_connected(struct FsmInst *fi, int event, void *arg)
        if (pr != -1)
                st->l2.l2l3(st, pr, NULL);
 
-       if (skb_queue_len(&st->l2.i_queue) && cansend(st))
+       if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
                st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
 }
 
@@ -1014,7 +1015,7 @@ l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
                        if(typ != RR) FsmDelTimer(&st->l2.t203, 9);
                        restart_t200(st, 12);
                }
-               if (skb_queue_len(&st->l2.i_queue) && (typ == RR))
+               if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR))
                        st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
        } else
                nrerrorrecovery(fi);
@@ -1120,7 +1121,7 @@ l2_got_iframe(struct FsmInst *fi, int event, void *arg)
                return;
        }
 
-       if (skb_queue_len(&st->l2.i_queue) && (fi->state == ST_L2_7))
+       if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7))
                st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
        if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag))
                enquiry_cr(st, RR, RSP, 0);
@@ -1138,7 +1139,7 @@ l2_got_tei(struct FsmInst *fi, int event, void *arg)
                test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
        } else
                FsmChangeState(fi, ST_L2_4);
-       if (skb_queue_len(&st->l2.ui_queue))
+       if (!skb_queue_empty(&st->l2.ui_queue))
                tx_ui(st);
 }
 
@@ -1301,7 +1302,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
                FsmDelTimer(&st->l2.t203, 13);
                FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11);
        }
-       if (skb_queue_len(&l2->i_queue) && cansend(st))
+       if (!skb_queue_empty(&l2->i_queue) && cansend(st))
                st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
 }
 
@@ -1347,7 +1348,7 @@ l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
                        }
                        invoke_retransmission(st, nr);
                        FsmChangeState(fi, ST_L2_7);
-                       if (skb_queue_len(&l2->i_queue) && cansend(st))
+                       if (!skb_queue_empty(&l2->i_queue) && cansend(st))
                                st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
                } else
                        nrerrorrecovery(fi);
index abcc9530eb347b007b74ea1f5a774c361e2c9902..c9917cd2132b74e0fb311447aaa66c1b03cdd395 100644 (file)
@@ -302,7 +302,7 @@ release_l3_process(struct l3_process *p)
                                !test_bit(FLG_PTP, &p->st->l2.flag)) {
                                if (p->debug)
                                        l3_debug(p->st, "release_l3_process: last process");
-                               if (!skb_queue_len(&p->st->l3.squeue)) {
+                               if (skb_queue_empty(&p->st->l3.squeue)) {
                                        if (p->debug)
                                                l3_debug(p->st, "release_l3_process: release link");
                                        if (p->st->protocol != ISDN_PTYPE_NI1)
index ad5aa38fb5a6eb6001b4b97d1f8154037f3836e3..b37ef1f06b3dffa55e36c313f7dd7238a1b6ceb0 100644 (file)
@@ -1223,7 +1223,7 @@ isdn_tty_write(struct tty_struct *tty, const u_char * buf, int count)
                total += c;
        }
        atomic_dec(&info->xmit_lock);
-       if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) {
+       if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) {
                if (m->mdmreg[REG_DXMT] & BIT_DXMT) {
                        isdn_tty_senddown(info);
                        isdn_tty_tint(info);
@@ -1284,7 +1284,7 @@ isdn_tty_flush_chars(struct tty_struct *tty)
 
        if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_flush_chars"))
                return;
-       if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue)))
+       if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue))
                isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, 1);
 }
 
index 9fc0c1e037321d6a295165e731cfc41a5b54cbd4..e0d1b01cc74c5b5badfa64cd2ca1a4a1457b7c68 100644 (file)
@@ -304,12 +304,12 @@ icn_pollbchan_send(int channel, icn_card * card)
        isdn_ctrl cmd;
 
        if (!(card->sndcount[channel] || card->xskb[channel] ||
-             skb_queue_len(&card->spqueue[channel])))
+             !skb_queue_empty(&card->spqueue[channel])))
                return;
        if (icn_trymaplock_channel(card, mch)) {
                while (sbfree && 
                       (card->sndcount[channel] ||
-                       skb_queue_len(&card->spqueue[channel]) ||
+                       !skb_queue_empty(&card->spqueue[channel]) ||
                        card->xskb[channel])) {
                        spin_lock_irqsave(&card->lock, flags);
                        if (card->xmit_lock[channel]) {
index ece1b1a1318632da085d4e0a855bf4c2cb7a5e7f..c27e417f32bf5958e322d4277dc7a808c2777284 100644 (file)
@@ -304,7 +304,7 @@ static inline void scc_discard_buffers(struct scc_channel *scc)
                scc->tx_buff = NULL;
        }
        
-       while (skb_queue_len(&scc->tx_queue))
+       while (!skb_queue_empty(&scc->tx_queue))
                dev_kfree_skb(skb_dequeue(&scc->tx_queue));
 
        spin_unlock_irqrestore(&scc->lock, flags);
@@ -1126,8 +1126,7 @@ static void t_dwait(unsigned long channel)
        
        if (scc->stat.tx_state == TXS_WAIT)     /* maxkeyup or idle timeout */
        {
-               if (skb_queue_len(&scc->tx_queue) == 0) /* nothing to send */
-               {
+               if (skb_queue_empty(&scc->tx_queue)) {  /* nothing to send */
                        scc->stat.tx_state = TXS_IDLE;
                        netif_wake_queue(scc->dev);     /* t_maxkeyup locked it. */
                        return;
index 5e48b9ab30455ca661f3c03c6bb128910778d22c..59e8183c639e0be7e9a07dc9c8295d5bd78281e6 100644 (file)
@@ -364,7 +364,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
        spin_lock_irqsave(&ap->recv_lock, flags);
        ppp_async_input(ap, buf, cflags, count);
        spin_unlock_irqrestore(&ap->recv_lock, flags);
-       if (skb_queue_len(&ap->rqueue))
+       if (!skb_queue_empty(&ap->rqueue))
                tasklet_schedule(&ap->tsk);
        ap_put(ap);
        if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
index ab726ab43798811bd28d76c293f4fc3564dff75a..a32668e88e09fc2a157e3ac5500c94ae158c9529 100644 (file)
@@ -1237,8 +1237,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
                pch = list_entry(list, struct channel, clist);
                navail += pch->avail = (pch->chan != NULL);
                if (pch->avail) {
-                       if (skb_queue_len(&pch->file.xq) == 0
-                           || !pch->had_frag) {
+                       if (skb_queue_empty(&pch->file.xq) ||
+                           !pch->had_frag) {
                                pch->avail = 2;
                                ++nfree;
                        }
@@ -1374,8 +1374,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
 
                /* try to send it down the channel */
                chan = pch->chan;
-               if (skb_queue_len(&pch->file.xq)
-                   || !chan->ops->start_xmit(chan, frag))
+               if (!skb_queue_empty(&pch->file.xq) ||
+                   !chan->ops->start_xmit(chan, frag))
                        skb_queue_tail(&pch->file.xq, frag);
                pch->had_frag = 1;
                p += flen;
@@ -1412,7 +1412,7 @@ ppp_channel_push(struct channel *pch)
 
        spin_lock_bh(&pch->downl);
        if (pch->chan != 0) {
-               while (skb_queue_len(&pch->file.xq) > 0) {
+               while (!skb_queue_empty(&pch->file.xq)) {
                        skb = skb_dequeue(&pch->file.xq);
                        if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
                                /* put the packet back and try again later */
@@ -1426,7 +1426,7 @@ ppp_channel_push(struct channel *pch)
        }
        spin_unlock_bh(&pch->downl);
        /* see if there is anything from the attached unit to be sent */
-       if (skb_queue_len(&pch->file.xq) == 0) {
+       if (skb_queue_empty(&pch->file.xq)) {
                read_lock_bh(&pch->upl);
                ppp = pch->ppp;
                if (ppp != 0)
index fd9f50180355b79bef8ae07a7fe44b68621a59f5..4d51c0c8023d160b5d82c3d09e37e440fe0e16bf 100644 (file)
@@ -406,7 +406,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
        spin_lock_irqsave(&ap->recv_lock, flags);
        ppp_sync_input(ap, buf, cflags, count);
        spin_unlock_irqrestore(&ap->recv_lock, flags);
-       if (skb_queue_len(&ap->rqueue))
+       if (!skb_queue_empty(&ap->rqueue))
                tasklet_schedule(&ap->tsk);
        sp_put(ap);
        if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
index 7bfee366297b655dd76c3d3387835775321e87a7..effab0b9adca176b6b6a3b9fd553b256d9f1c850 100644 (file)
@@ -215,7 +215,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
 
        poll_wait(file, &tun->read_wait, wait);
  
-       if (skb_queue_len(&tun->readq))
+       if (!skb_queue_empty(&tun->readq))
                mask |= POLLIN | POLLRDNORM;
 
        return mask;
index c12648d8192b7dd7df2ffbbe7bbb68ccd87a30cb..47f3c5d0203da70e394d82509ab69fdea3c06cfa 100644 (file)
@@ -2374,7 +2374,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
        /*
         * Clean out tx queue
         */
-       if (test_bit(FLAG_MPI, &ai->flags) && skb_queue_len (&ai->txq) > 0) {
+       if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) {
                struct sk_buff *skb = NULL;
                for (;(skb = skb_dequeue(&ai->txq));)
                        dev_kfree_skb(skb);
@@ -3287,7 +3287,7 @@ exitrx:
                                if (status & EV_TXEXC)
                                        get_tx_error(apriv, -1);
                                spin_lock_irqsave(&apriv->aux_lock, flags);
-                               if (skb_queue_len (&apriv->txq)) {
+                               if (!skb_queue_empty(&apriv->txq)) {
                                        spin_unlock_irqrestore(&apriv->aux_lock,flags);
                                        mpi_send_packet (dev);
                                } else {
index 60440dbe3a2764168056bf2b48acbaaf4d64cecb..24c0af49c25c8f416615b4a009af21e6045b5043 100644 (file)
@@ -428,7 +428,7 @@ claw_pack_skb(struct claw_privbk *privptr)
        new_skb = NULL;         /* assume no dice */
        pkt_cnt = 0;
        CLAW_DBF_TEXT(4,trace,"PackSKBe");
-       if (skb_queue_len(&p_ch->collect_queue) > 0) {
+       if (!skb_queue_empty(&p_ch->collect_queue)) {
        /* some data */
                held_skb = skb_dequeue(&p_ch->collect_queue);
                if (p_env->packing != DO_PACKED)
@@ -1254,7 +1254,7 @@ claw_write_next ( struct chbk * p_ch )
        privptr = (struct claw_privbk *) dev->priv;
         claw_free_wrt_buf( dev );
        if ((privptr->write_free_count > 0) &&
-           (skb_queue_len(&p_ch->collect_queue) > 0)) {
+           !skb_queue_empty(&p_ch->collect_queue)) {
                pk_skb = claw_pack_skb(privptr);
                while (pk_skb != NULL) {
                        rc = claw_hw_tx( pk_skb, dev,1);
index 3080393e823db43117d22a6bd73adf7cfdcc6013..968f2c113efeefa2bf627f39bc711c32e4dbfff3 100644 (file)
@@ -156,7 +156,7 @@ ctc_tty_readmodem(ctc_tty_info *info)
                                        skb_queue_head(&info->rx_queue, skb);
                                else {
                                        kfree_skb(skb);
-                                       ret = skb_queue_len(&info->rx_queue);
+                                       ret = !skb_queue_empty(&info->rx_queue);
                                }
                        }
                }
@@ -530,7 +530,7 @@ ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count)
                total += c;
                count -= c;
        }
-       if (skb_queue_len(&info->tx_queue)) {
+       if (!skb_queue_empty(&info->tx_queue)) {
                info->lsr &= ~UART_LSR_TEMT;
                tasklet_schedule(&info->tasklet);
        }
@@ -594,7 +594,7 @@ ctc_tty_flush_chars(struct tty_struct *tty)
                return;
        if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
                return;
-       if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue)))
+       if (tty->stopped || tty->hw_stopped || skb_queue_empty(&info->tx_queue))
                return;
        tasklet_schedule(&info->tasklet);
 }
index 8a945f4f36939d35bc8b5d541b8e9d0b7cb9be06..576f3b852fce39ccfff151704c1447b883e1703f 100644 (file)
@@ -3227,9 +3227,9 @@ static int usbnet_stop (struct net_device *net)
        temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);
 
        // maybe wait for deletions to finish.
-       while (skb_queue_len (&dev->rxq)
-                       && skb_queue_len (&dev->txq)
-                       && skb_queue_len (&dev->done)) {
+       while (!skb_queue_empty(&dev->rxq) &&
+              !skb_queue_empty(&dev->txq) &&
+              !skb_queue_empty(&dev->done)) {
                msleep(UNLINK_TIMEOUT_MS);
                if (netif_msg_ifdown (dev))
                        devdbg (dev, "waited for %d urb completions", temp);
index 71d6af83b631702d3053e77814ea0b2eec250f29..92c828029cd8d46d8a753e3f7a3c54882572be89 100644 (file)
@@ -224,7 +224,7 @@ int  irda_device_is_receiving(struct net_device *dev);
 /* Interface for internal use */
 static inline int irda_device_txqueue_empty(const struct net_device *dev)
 {
-       return (skb_queue_len(&dev->qdisc->q) == 0);
+       return skb_queue_empty(&dev->qdisc->q);
 }
 int  irda_device_set_raw_mode(struct net_device* self, int status);
 struct net_device *alloc_irdadev(int sizeof_priv);
index a166918ca56d2f36f80f21d7678dd92b86c88454..4d5b12e4dc11a099ae264864d573f4c78fbf7dae 100644 (file)
@@ -991,7 +991,7 @@ static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
 
 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
 {
-       if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
+       if (skb_queue_empty(&tp->out_of_order_queue) &&
            tp->rcv_wnd &&
            atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
            !tp->urg_data)
index 2e341de3e7634fda2ff7ee7bf912cc56f8003643..901eff7ebe74072d0c7f15df3ad5a420248b85a5 100644 (file)
@@ -213,7 +213,7 @@ static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, in
        return kernel_sendmsg(sock, &msg, &iv, 1, len);
 }
 
-static int cmtp_process_transmit(struct cmtp_session *session)
+static void cmtp_process_transmit(struct cmtp_session *session)
 {
        struct sk_buff *skb, *nskb;
        unsigned char *hdr;
@@ -223,7 +223,7 @@ static int cmtp_process_transmit(struct cmtp_session *session)
 
        if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
                BT_ERR("Can't allocate memory for new frame");
-               return -ENOMEM;
+               return;
        }
 
        while ((skb = skb_dequeue(&session->transmit))) {
@@ -275,8 +275,6 @@ static int cmtp_process_transmit(struct cmtp_session *session)
        cmtp_send_frame(session, nskb->data, nskb->len);
 
        kfree_skb(nskb);
-
-       return skb_queue_len(&session->transmit);
 }
 
 static int cmtp_session(void *arg)
index affbc55462e860f8a525ac9cecac09ca2cdf9ef9..de8af5f423942750c9677dd86f461366b9f56923 100644 (file)
@@ -428,7 +428,7 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
        return kernel_sendmsg(sock, &msg, &iv, 1, len);
 }
 
-static int hidp_process_transmit(struct hidp_session *session)
+static void hidp_process_transmit(struct hidp_session *session)
 {
        struct sk_buff *skb;
 
@@ -453,9 +453,6 @@ static int hidp_process_transmit(struct hidp_session *session)
                hidp_set_timer(session);
                kfree_skb(skb);
        }
-
-       return skb_queue_len(&session->ctrl_transmit) +
-                               skb_queue_len(&session->intr_transmit);
 }
 
 static int hidp_session(void *arg)
index f3f6355a2786c78eefe8213fe95dd67cfb93b31f..63a123c5c41b38dedef853b56c44ac4f55d55d16 100644 (file)
@@ -590,8 +590,11 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
 
-               if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) ||
-                               signal_pending(current) || !timeo)
+               if (!skb_queue_empty(&sk->sk_receive_queue) ||
+                   sk->sk_err ||
+                   (sk->sk_shutdown & RCV_SHUTDOWN) ||
+                   signal_pending(current) ||
+                   !timeo)
                        break;
 
                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
index 6d689200bcf3e9c68113b964b610668d6c631f68..6304590fd36a5f6d132f8a6262d5acdb616c0ebf 100644 (file)
@@ -781,7 +781,7 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
 
        BT_DBG("tty %p dev %p", tty, dev);
 
-       if (skb_queue_len(&dlc->tx_queue))
+       if (!skb_queue_empty(&dlc->tx_queue))
                return dlc->mtu;
 
        return 0;
index 29bb3cd219655bb93e1fe6bc13d82eb40f4f9654..96a02800cd283648e7d92d24c05e863ac4706353 100644 (file)
@@ -536,7 +536,7 @@ static void dn_keepalive(struct sock *sk)
         * we are double checking that we are not sending too
         * many of these keepalive frames.
         */
-       if (skb_queue_len(&scp->other_xmit_queue) == 0)
+       if (skb_queue_empty(&scp->other_xmit_queue))
                dn_nsp_send_link(sk, DN_NOCHANGE, 0);
 }
 
@@ -1191,7 +1191,7 @@ static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table
        struct dn_scp *scp = DN_SK(sk);
        int mask = datagram_poll(file, sock, wait);
 
-       if (skb_queue_len(&scp->other_receive_queue))
+       if (!skb_queue_empty(&scp->other_receive_queue))
                mask |= POLLRDBAND;
 
        return mask;
@@ -1214,7 +1214,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 
        case SIOCATMARK:
                lock_sock(sk);
-               val = (skb_queue_len(&scp->other_receive_queue) != 0);
+               val = !skb_queue_empty(&scp->other_receive_queue);
                if (scp->state != DN_RUN)
                        val = -ENOTCONN;
                release_sock(sk);
@@ -1630,7 +1630,7 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int
        int len = 0;
 
        if (flags & MSG_OOB)
-               return skb_queue_len(q) ? 1 : 0;
+               return !skb_queue_empty(q) ? 1 : 0;
 
        while(skb != (struct sk_buff *)q) {
                struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -1707,7 +1707,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
                if (sk->sk_err)
                        goto out;
 
-               if (skb_queue_len(&scp->other_receive_queue)) {
+               if (!skb_queue_empty(&scp->other_receive_queue)) {
                        if (!(flags & MSG_OOB)) {
                                msg->msg_flags |= MSG_OOB;
                                if (!scp->other_report) {
index 42abbf3f524f671b2a94307c216c38ff31f215bc..8cce1fdbda907ac29dc29c2ea530102cc8f36d86 100644 (file)
@@ -342,7 +342,8 @@ int dn_nsp_xmit_timeout(struct sock *sk)
 
        dn_nsp_output(sk);
 
-       if (skb_queue_len(&scp->data_xmit_queue) || skb_queue_len(&scp->other_xmit_queue))
+       if (!skb_queue_empty(&scp->data_xmit_queue) ||
+           !skb_queue_empty(&scp->other_xmit_queue))
                scp->persist = dn_nsp_persist(sk);
 
        return 0;
index 29894c74916336afd45dcd30f43e2ef2b34e3cfc..ddb6ce4ecff291e9ecec53e86a2781eefe61a3bb 100644 (file)
@@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk)
        struct sk_buff *skb;
        struct tcp_sock *tp = tcp_sk(sk);
 
-       NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
+       NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
 
        /* RX process wants to run with disabled BHs, though it is not
         * necessary */
@@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                         * is not empty. It is more elegant, but eats cycles,
                         * unfortunately.
                         */
-                       if (skb_queue_len(&tp->ucopy.prequeue))
+                       if (!skb_queue_empty(&tp->ucopy.prequeue))
                                goto do_prequeue;
 
                        /* __ Set realtime policy in scheduler __ */
@@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        }
 
                        if (tp->rcv_nxt == tp->copied_seq &&
-                           skb_queue_len(&tp->ucopy.prequeue)) {
+                           !skb_queue_empty(&tp->ucopy.prequeue)) {
 do_prequeue:
                                tcp_prequeue_process(sk);
 
@@ -1476,7 +1476,7 @@ skip_copy:
        } while (len > 0);
 
        if (user_recv) {
-               if (skb_queue_len(&tp->ucopy.prequeue)) {
+               if (!skb_queue_empty(&tp->ucopy.prequeue)) {
                        int chunk;
 
                        tp->ucopy.len = copied > 0 ? len : 0;
index 8de2f1071c2bf5dd161026ff75da87227b1f73ec..53a8a5399f1e6e6de3b13a05d691612c533f433c 100644 (file)
@@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
        int this_sack;
 
        /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
-       if (skb_queue_len(&tp->out_of_order_queue) == 0) {
+       if (skb_queue_empty(&tp->out_of_order_queue)) {
                tp->rx_opt.num_sacks = 0;
                tp->rx_opt.eff_sacks = tp->rx_opt.dsack;
                return;
@@ -2935,13 +2935,13 @@ queue_and_out:
                if(th->fin)
                        tcp_fin(skb, sk, th);
 
-               if (skb_queue_len(&tp->out_of_order_queue)) {
+               if (!skb_queue_empty(&tp->out_of_order_queue)) {
                        tcp_ofo_queue(sk);
 
                        /* RFC2581. 4.2. SHOULD send immediate ACK, when
                         * gap in queue is filled.
                         */
-                       if (!skb_queue_len(&tp->out_of_order_queue))
+                       if (skb_queue_empty(&tp->out_of_order_queue))
                                tp->ack.pingpong = 0;
                }
 
@@ -3249,9 +3249,8 @@ static int tcp_prune_queue(struct sock *sk)
         * This must not ever occur. */
 
        /* First, purge the out_of_order queue. */
-       if (skb_queue_len(&tp->out_of_order_queue)) {
-               NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, 
-                                skb_queue_len(&tp->out_of_order_queue));
+       if (!skb_queue_empty(&tp->out_of_order_queue)) {
+               NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED);
                __skb_queue_purge(&tp->out_of_order_queue);
 
                /* Reset SACK state.  A conforming SACK implementation will
index b127b4498565f1801444d11abe92cefd6c1c5c54..0084227438c28d26bc2d089b1facc4675310f741 100644 (file)
@@ -231,11 +231,10 @@ static void tcp_delack_timer(unsigned long data)
        }
        tp->ack.pending &= ~TCP_ACK_TIMER;
 
-       if (skb_queue_len(&tp->ucopy.prequeue)) {
+       if (!skb_queue_empty(&tp->ucopy.prequeue)) {
                struct sk_buff *skb;
 
-               NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, 
-                                skb_queue_len(&tp->ucopy.prequeue));
+               NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED);
 
                while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
                        sk->sk_backlog_rcv(sk, skb);
index 046ad0750e48d4282e0501f3e1d22387caa3fa6d..7029618f5719fa6659bd096c7d7a0345b29dff70 100644 (file)
@@ -445,9 +445,8 @@ void irlap_disconnect_request(struct irlap_cb *self)
        IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
 
        /* Don't disconnect until all data frames are successfully sent */
-       if (skb_queue_len(&self->txq) > 0) {
+       if (!skb_queue_empty(&self->txq)) {
                self->disconnect_pending = TRUE;
-
                return;
        }
 
index 1cd89f5f3b759320f6472fb06d83800e871be82f..a505b5457608097238f8da41cdd3b6318e243425 100644 (file)
@@ -191,7 +191,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
         * Send out the RR frames faster if our own transmit queue is empty, or
         * if the peer is busy. The effect is a much faster conversation
         */
-       if ((skb_queue_len(&self->txq) == 0) || (self->remote_busy)) {
+       if (skb_queue_empty(&self->txq) || self->remote_busy) {
                if (self->fast_RR == TRUE) {
                        /*
                         *  Assert that the fast poll timer has not reached the
@@ -263,7 +263,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
                IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__,
                           skb_queue_len(&self->txq));
 
-               if (skb_queue_len(&self->txq)) {
+               if (!skb_queue_empty(&self->txq)) {
                        /* Prevent race conditions with irlap_data_request() */
                        self->local_busy = TRUE;
 
@@ -1074,7 +1074,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
 #else  /* CONFIG_IRDA_DYNAMIC_WINDOW */
                        /* Window has been adjusted for the max packet
                         * size, so much simpler... - Jean II */
-                       nextfit = (skb_queue_len(&self->txq) > 0);
+                       nextfit = !skb_queue_empty(&self->txq);
 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
                        /*
                         *  Send data with poll bit cleared only if window > 1
@@ -1814,7 +1814,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
 #else  /* CONFIG_IRDA_DYNAMIC_WINDOW */
                        /* Window has been adjusted for the max packet
                         * size, so much simpler... - Jean II */
-                       nextfit = (skb_queue_len(&self->txq) > 0);
+                       nextfit = !skb_queue_empty(&self->txq);
 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */
                        /*
                         *  Send data with final bit cleared only if window > 1
@@ -1937,7 +1937,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
                                irlap_data_indication(self, skb, FALSE);
 
                                /* Any pending data requests?  */
-                               if ((skb_queue_len(&self->txq) > 0) &&
+                               if (!skb_queue_empty(&self->txq) &&
                                    (self->window > 0))
                                {
                                        self->ack_required = TRUE;
@@ -2038,7 +2038,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
                        /*
                         *  Any pending data requests?
                         */
-                       if ((skb_queue_len(&self->txq) > 0) &&
+                       if (!skb_queue_empty(&self->txq) &&
                            (self->window > 0) && !self->remote_busy)
                        {
                                irlap_data_indication(self, skb, TRUE);
@@ -2069,7 +2069,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
                 */
                nr_status = irlap_validate_nr_received(self, info->nr);
                if (nr_status == NR_EXPECTED) {
-                       if ((skb_queue_len( &self->txq) > 0) &&
+                       if (!skb_queue_empty(&self->txq) &&
                            (self->window > 0)) {
                                self->remote_busy = FALSE;
 
index 040abe714aa35f11161ffb37e88ecebe7c71bef8..6dafbb43b5296922f62cacf8c596d9987d835c0f 100644 (file)
@@ -1018,11 +1018,10 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
        /*
         *  We can now fill the window with additional data frames
         */
-       while (skb_queue_len( &self->txq) > 0) {
+       while (!skb_queue_empty(&self->txq)) {
 
                IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__);
-               if ((skb_queue_len( &self->txq) > 0) &&
-                   (self->window > 0)) {
+               if (self->window > 0) {
                        skb = skb_dequeue( &self->txq);
                        IRDA_ASSERT(skb != NULL, return;);
 
@@ -1031,8 +1030,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
                         *  bit cleared
                         */
                        if ((self->window > 1) &&
-                           skb_queue_len(&self->txq) > 0)
-                       {
+                           !skb_queue_empty(&self->txq)) {
                                irlap_send_data_primary(self, skb);
                        } else {
                                irlap_send_data_primary_poll(self, skb);
index d091ccf773b3db03abc6bcda3c009b5d6c4f4975..6602d901f8b111ba51e2947f51b4ebaf452d5dbb 100644 (file)
@@ -1513,7 +1513,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
        /*
         *  Check if there is still data segments in the transmit queue
         */
-       if (skb_queue_len(&self->tx_queue) > 0) {
+       if (!skb_queue_empty(&self->tx_queue)) {
                if (priority == P_HIGH) {
                        /*
                         *  No need to send the queued data, if we are
index cd130c3b72bc438063597d2245a5db26e6ec38d6..d5bdb53a348f3002c184f3f0059ebbf183ba3309 100644 (file)
@@ -84,7 +84,7 @@ static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr)
        if (llc->dev->flags & IFF_LOOPBACK)
                goto out;
        rc = 1;
-       if (!skb_queue_len(&llc->pdu_unack_q))
+       if (skb_queue_empty(&llc->pdu_unack_q))
                goto out;
        skb = skb_peek(&llc->pdu_unack_q);
        pdu = llc_pdu_sn_hdr(skb);
index fc456a7aaec39bad1c147a8eff9a9d01e48b722c..3405fdf41b93ab1950f739673614499b06b0ad3f 100644 (file)
@@ -858,7 +858,7 @@ static inline void netlink_rcv_wake(struct sock *sk)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
 
-       if (!skb_queue_len(&sk->sk_receive_queue))
+       if (skb_queue_empty(&sk->sk_receive_queue))
                clear_bit(0, &nlk->state);
        if (!test_bit(0, &nlk->state))
                wake_up_interruptible(&nlk->wait);
index 664d0e47374fd1003ec8f0296aa96afa8ed92c9d..7845d045eec4d1434037eaf0a964f33f70fcb0e9 100644 (file)
@@ -385,7 +385,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt)
        memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256);
 
        q->qcount = -1;
-       if (skb_queue_len(&sch->q) == 0)
+       if (skb_queue_empty(&sch->q))
                PSCHED_SET_PASTPERFECT(q->qidlestart);
        sch_tree_unlock(sch);
        return 0;
index c420eba4876b50fd853b816c78f04e5c5f127674..d403e34088ad75c1c12ee5fbba7bf8d90e233cfc 100644 (file)
@@ -302,7 +302,7 @@ static void unix_write_space(struct sock *sk)
  * may receive messages only from that peer. */
 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
 {
-       if (skb_queue_len(&sk->sk_receive_queue)) {
+       if (!skb_queue_empty(&sk->sk_receive_queue)) {
                skb_queue_purge(&sk->sk_receive_queue);
                wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
 
@@ -1619,7 +1619,7 @@ static long unix_stream_data_wait(struct sock * sk, long timeo)
        for (;;) {
                prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
 
-               if (skb_queue_len(&sk->sk_receive_queue) ||
+               if (!skb_queue_empty(&sk->sk_receive_queue) ||
                    sk->sk_err ||
                    (sk->sk_shutdown & RCV_SHUTDOWN) ||
                    signal_pending(current) ||