]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
IPoIB/cm: spin_lock_irqsave() -> spin_lock_irq() replacements
authorRoland Dreier <rolandd@cisco.com>
Wed, 25 Apr 2007 04:30:37 +0000 (21:30 -0700)
committerRoland Dreier <rolandd@cisco.com>
Wed, 25 Apr 2007 04:30:37 +0000 (21:30 -0700)
There are quite a few places in ipoib_cm.c where we know IRQs are
enabled because we do something that sleeps in the same function, so
we can convert several occurrences of spin_lock_irqsave() to a plain
spin_lock_irq().  This cleans up the source a little and makes the
code smaller too:

add/remove: 0/0 grow/shrink: 1/5 up/down: 3/-51 (-48)
function                                     old     new   delta
ipoib_cm_tx_reap                             403     406      +3
ipoib_cm_stale_task                          146     145      -1
ipoib_cm_dev_stop                            173     172      -1
ipoib_cm_tx_handler                          964     956      -8
ipoib_cm_rx_handler                          956     937     -19
ipoib_cm_skb_reap                            212     190     -22

Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/ulp/ipoib/ipoib_cm.c

index 7a4af7a3e04f5a6d29d024f81ccfdb96feda75c9..da7e10230cf8bd48fdd2501d6c2ae85038bbc345 100644 (file)
@@ -228,7 +228,6 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
        struct net_device *dev = cm_id->context;
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_cm_rx *p;
-       unsigned long flags;
        unsigned psn;
        int ret;
 
@@ -257,9 +256,9 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
 
        cm_id->context = p;
        p->jiffies = jiffies;
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irq(&priv->lock);
        list_add(&p->list, &priv->cm.passive_ids);
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irq(&priv->lock);
        queue_delayed_work(ipoib_workqueue,
                           &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
        return 0;
@@ -277,7 +276,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
 {
        struct ipoib_cm_rx *p;
        struct ipoib_dev_priv *priv;
-       unsigned long flags;
        int ret;
 
        switch (event->event) {
@@ -290,14 +288,14 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
        case IB_CM_REJ_RECEIVED:
                p = cm_id->context;
                priv = netdev_priv(p->dev);
-               spin_lock_irqsave(&priv->lock, flags);
+               spin_lock_irq(&priv->lock);
                if (list_empty(&p->list))
                        ret = 0; /* Connection is going away already. */
                else {
                        list_del_init(&p->list);
                        ret = -ECONNRESET;
                }
-               spin_unlock_irqrestore(&priv->lock, flags);
+               spin_unlock_irq(&priv->lock);
                if (ret) {
                        ib_destroy_qp(p->qp);
                        kfree(p);
@@ -612,23 +610,22 @@ void ipoib_cm_dev_stop(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_cm_rx *p;
-       unsigned long flags;
 
        if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
                return;
 
        ib_destroy_cm_id(priv->cm.id);
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irq(&priv->lock);
        while (!list_empty(&priv->cm.passive_ids)) {
                p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
                list_del_init(&p->list);
-               spin_unlock_irqrestore(&priv->lock, flags);
+               spin_unlock_irq(&priv->lock);
                ib_destroy_cm_id(p->id);
                ib_destroy_qp(p->qp);
                kfree(p);
-               spin_lock_irqsave(&priv->lock, flags);
+               spin_lock_irq(&priv->lock);
        }
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irq(&priv->lock);
 
        cancel_delayed_work(&priv->cm.stale_task);
 }
@@ -642,7 +639,6 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
        struct ib_qp_attr qp_attr;
        int qp_attr_mask, ret;
        struct sk_buff *skb;
-       unsigned long flags;
 
        p->mtu = be32_to_cpu(data->mtu);
 
@@ -680,12 +676,12 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
 
        skb_queue_head_init(&skqueue);
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irq(&priv->lock);
        set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
        if (p->neigh)
                while ((skb = __skb_dequeue(&p->neigh->queue)))
                        __skb_queue_tail(&skqueue, skb);
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irq(&priv->lock);
 
        while ((skb = __skb_dequeue(&skqueue))) {
                skb->dev = p->dev;
@@ -895,7 +891,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
        struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
        struct net_device *dev = priv->dev;
        struct ipoib_neigh *neigh;
-       unsigned long flags;
        int ret;
 
        switch (event->event) {
@@ -914,7 +909,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
        case IB_CM_REJ_RECEIVED:
        case IB_CM_TIMEWAIT_EXIT:
                ipoib_dbg(priv, "CM error %d.\n", event->event);
-               spin_lock_irqsave(&priv->tx_lock, flags);
+               spin_lock_irq(&priv->tx_lock);
                spin_lock(&priv->lock);
                neigh = tx->neigh;
 
@@ -934,7 +929,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
                }
 
                spin_unlock(&priv->lock);
-               spin_unlock_irqrestore(&priv->tx_lock, flags);
+               spin_unlock_irq(&priv->tx_lock);
                break;
        default:
                break;
@@ -1023,21 +1018,20 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
        struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
                                                   cm.reap_task);
        struct ipoib_cm_tx *p;
-       unsigned long flags;
 
-       spin_lock_irqsave(&priv->tx_lock, flags);
+       spin_lock_irq(&priv->tx_lock);
        spin_lock(&priv->lock);
        while (!list_empty(&priv->cm.reap_list)) {
                p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
                list_del(&p->list);
                spin_unlock(&priv->lock);
-               spin_unlock_irqrestore(&priv->tx_lock, flags);
+               spin_unlock_irq(&priv->tx_lock);
                ipoib_cm_tx_destroy(p);
-               spin_lock_irqsave(&priv->tx_lock, flags);
+               spin_lock_irq(&priv->tx_lock);
                spin_lock(&priv->lock);
        }
        spin_unlock(&priv->lock);
-       spin_unlock_irqrestore(&priv->tx_lock, flags);
+       spin_unlock_irq(&priv->tx_lock);
 }
 
 static void ipoib_cm_skb_reap(struct work_struct *work)
@@ -1046,15 +1040,14 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
                                                   cm.skb_task);
        struct net_device *dev = priv->dev;
        struct sk_buff *skb;
-       unsigned long flags;
 
        unsigned mtu = priv->mcast_mtu;
 
-       spin_lock_irqsave(&priv->tx_lock, flags);
+       spin_lock_irq(&priv->tx_lock);
        spin_lock(&priv->lock);
        while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
                spin_unlock(&priv->lock);
-               spin_unlock_irqrestore(&priv->tx_lock, flags);
+               spin_unlock_irq(&priv->tx_lock);
                if (skb->protocol == htons(ETH_P_IP))
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -1062,11 +1055,11 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
 #endif
                dev_kfree_skb_any(skb);
-               spin_lock_irqsave(&priv->tx_lock, flags);
+               spin_lock_irq(&priv->tx_lock);
                spin_lock(&priv->lock);
        }
        spin_unlock(&priv->lock);
-       spin_unlock_irqrestore(&priv->tx_lock, flags);
+       spin_unlock_irq(&priv->tx_lock);
 }
 
 void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
@@ -1088,9 +1081,8 @@ static void ipoib_cm_stale_task(struct work_struct *work)
        struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
                                                   cm.stale_task.work);
        struct ipoib_cm_rx *p;
-       unsigned long flags;
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irq(&priv->lock);
        while (!list_empty(&priv->cm.passive_ids)) {
                /* List if sorted by LRU, start from tail,
                 * stop when we see a recently used entry */
@@ -1098,13 +1090,13 @@ static void ipoib_cm_stale_task(struct work_struct *work)
                if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
                        break;
                list_del_init(&p->list);
-               spin_unlock_irqrestore(&priv->lock, flags);
+               spin_unlock_irq(&priv->lock);
                ib_destroy_cm_id(p->id);
                ib_destroy_qp(p->qp);
                kfree(p);
-               spin_lock_irqsave(&priv->lock, flags);
+               spin_lock_irq(&priv->lock);
        }
-       spin_unlock_irqrestore(&priv->lock, flags);
+       spin_unlock_irq(&priv->lock);
 }