]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
ath9k: Use GFP_ATOMIC when allocating TX private area
authorLuis R. Rodriguez <lrodriguez@atheros.com>
Wed, 3 Dec 2008 11:35:30 +0000 (03:35 -0800)
committerJohn W. Linville <linville@tuxdriver.com>
Fri, 5 Dec 2008 14:35:59 +0000 (09:35 -0500)
Using GFP_KERNEL was wrong and produces a 'scheduling while atomic'
bug as we're in a tasklet. Also, check for proper return values
now, in case allocation fails and be sure to stop the TX queue
in case of memory issues but gaurantee the TX queue will
eventually be woken up.

Signed-off-by: Senthil Balasubramanian <senthilkumar@atheros.com>
Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
drivers/net/wireless/ath9k/xmit.c

index 17fd05e2f247c485259681017369120de70e7e29..9de27c681b86f9135e10fc5e3572e8e0f819f753 100644 (file)
@@ -1652,7 +1652,9 @@ static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
        int hdrlen;
        __le16 fc;
 
-       tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_KERNEL);
+       tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
+       if (unlikely(!tx_info_priv))
+               return -ENOMEM;
        tx_info->rate_driver_data[0] = tx_info_priv;
        hdrlen = ieee80211_get_hdrlen_from_skb(skb);
        fc = hdr->frame_control;
@@ -1801,10 +1803,26 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
 
        r = ath_tx_setup_buffer(sc, bf, skb, txctl);
        if (unlikely(r)) {
-               spin_lock_bh(&sc->sc_txbuflock);
+               struct ath_txq *txq = txctl->txq;
+
                DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
+
+               /* upon ath_tx_processq() this TX queue will be resumed, we
+                * guarantee this will happen by knowing beforehand that
+                * we will at least have to run TX completionon one buffer
+                * on the queue */
+               spin_lock_bh(&txq->axq_lock);
+               if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
+                       ieee80211_stop_queue(sc->hw,
+                               skb_get_queue_mapping(skb));
+                       txq->stopped = 1;
+               }
+               spin_unlock_bh(&txq->axq_lock);
+
+               spin_lock_bh(&sc->sc_txbuflock);
                list_add_tail(&bf->list, &sc->sc_txbuf);
                spin_unlock_bh(&sc->sc_txbuflock);
+
                return r;
        }