]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
RDMA/amso1100: Add spinlocks to serialize ib_post_send/ib_post_recv
authorTom Tucker <tom@opengridcomputing.com>
Tue, 3 Oct 2006 14:46:41 +0000 (09:46 -0500)
committerRoland Dreier <rolandd@cisco.com>
Tue, 10 Oct 2006 16:51:13 +0000 (09:51 -0700)
The AMSO driver was not thread-safe in the post WR code and had
code that would sleep if the WR post FIFO was full. Since these
functions can be called on interrupt level I changed the sleep to a
udelay.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/amso1100/c2_qp.c

index 12261132b0778edd8ea9a59a35016cf0103f1862..5bcf697aa335bace362fcb3ecc86de5ed58e0eb5 100644 (file)
@@ -35,6 +35,8 @@
  *
  */
 
+#include <linux/delay.h>
+
 #include "c2.h"
 #include "c2_vq.h"
 #include "c2_status.h"
@@ -705,10 +707,8 @@ static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
         * cannot get on the bus and the card and system hang in a
         * deadlock -- thus the need for this code. [TOT]
         */
-       while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(0);
-       }
+       while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
+               udelay(10);
 
        __raw_writel(C2_HINT_MAKE(mq_index, shared),
                     c2dev->regs + PCI_BAR0_ADAPTER_HINT);
@@ -766,6 +766,7 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
        struct c2_dev *c2dev = to_c2dev(ibqp->device);
        struct c2_qp *qp = to_c2qp(ibqp);
        union c2wr wr;
+       unsigned long lock_flags;
        int err = 0;
 
        u32 flags;
@@ -881,8 +882,10 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
                /*
                 * Post the puppy!
                 */
+               spin_lock_irqsave(&qp->lock, lock_flags);
                err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
                if (err) {
+                       spin_unlock_irqrestore(&qp->lock, lock_flags);
                        break;
                }
 
@@ -890,6 +893,7 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
                 * Enqueue mq index to activity FIFO.
                 */
                c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
+               spin_unlock_irqrestore(&qp->lock, lock_flags);
 
                ib_wr = ib_wr->next;
        }
@@ -905,6 +909,7 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
        struct c2_dev *c2dev = to_c2dev(ibqp->device);
        struct c2_qp *qp = to_c2qp(ibqp);
        union c2wr wr;
+       unsigned long lock_flags;
        int err = 0;
 
        if (qp->state > IB_QPS_RTS)
@@ -945,8 +950,10 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
                        break;
                }
 
+               spin_lock_irqsave(&qp->lock, lock_flags);
                err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
                if (err) {
+                       spin_unlock_irqrestore(&qp->lock, lock_flags);
                        break;
                }
 
@@ -954,6 +961,7 @@ int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
                 * Enqueue mq index to activity FIFO
                 */
                c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
+               spin_unlock_irqrestore(&qp->lock, lock_flags);
 
                ib_wr = ib_wr->next;
        }