]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
IB/ipath: Implement IB_EVENT_QP_LAST_WQE_REACHED
authorRalph Campbell <ralph.campbell@qlogic.com>
Sat, 25 Aug 2007 23:45:03 +0000 (16:45 -0700)
committerRoland Dreier <rolandd@cisco.com>
Wed, 10 Oct 2007 03:51:20 +0000 (20:51 -0700)
This patch implements the IB_EVENT_QP_LAST_WQE_REACHED event which is
needed by ib_ipoib to destroy the QP when used in connected mode.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/ipath/ipath_qp.c
drivers/infiniband/hw/ipath/ipath_rc.c
drivers/infiniband/hw/ipath/ipath_verbs.h

index a8c4a6b03d7af6ad540b61bcc992c5978933a8f0..6a41fdbc8e57dfca779759de14137fe33b811015 100644 (file)
@@ -377,13 +377,15 @@ static void ipath_reset_qp(struct ipath_qp *qp)
  * @err: the receive completion error to signal if a RWQE is active
  *
  * Flushes both send and receive work queues.
+ * Returns true if last WQE event should be generated.
  * The QP s_lock should be held and interrupts disabled.
  */
 
-void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
+int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
 {
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ib_wc wc;
+       int ret = 0;
 
        ipath_dbg("QP%d/%d in error state\n",
                  qp->ibqp.qp_num, qp->remote_qpn);
@@ -454,7 +456,10 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
                wq->tail = tail;
 
                spin_unlock(&qp->r_rq.lock);
-       }
+       } else if (qp->ibqp.event_handler)
+               ret = 1;
+
+       return ret;
 }
 
 /**
@@ -473,6 +478,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        struct ipath_qp *qp = to_iqp(ibqp);
        enum ib_qp_state cur_state, new_state;
        unsigned long flags;
+       int lastwqe = 0;
        int ret;
 
        spin_lock_irqsave(&qp->s_lock, flags);
@@ -532,7 +538,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                break;
 
        case IB_QPS_ERR:
-               ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+               lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
                break;
 
        default:
@@ -591,6 +597,14 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        qp->state = new_state;
        spin_unlock_irqrestore(&qp->s_lock, flags);
 
+       if (lastwqe) {
+               struct ib_event ev;
+
+               ev.device = qp->ibqp.device;
+               ev.element.qp = &qp->ibqp;
+               ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+               qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+       }
        ret = 0;
        goto bail;
 
index 53259daeb4f82d956c30271a57652d3595ad2b8e..5c29b2bfea17b7c6236181f214b4224fbf4cb60d 100644 (file)
@@ -1497,11 +1497,21 @@ send_ack:
 static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
 {
        unsigned long flags;
+       int lastwqe;
 
        spin_lock_irqsave(&qp->s_lock, flags);
        qp->state = IB_QPS_ERR;
-       ipath_error_qp(qp, err);
+       lastwqe = ipath_error_qp(qp, err);
        spin_unlock_irqrestore(&qp->s_lock, flags);
+
+       if (lastwqe) {
+               struct ib_event ev;
+
+               ev.device = qp->ibqp.device;
+               ev.element.qp = &qp->ibqp;
+               ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+               qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+       }
 }
 
 static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
index 619ad728b07bb0ffe93dada4acb1228aec1db57a..a1972295bffdd009e3b0a9f6d9838063971b3924 100644 (file)
@@ -672,7 +672,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
 
 int ipath_destroy_qp(struct ib_qp *ibqp);
 
-void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
+int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
 
 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                    int attr_mask, struct ib_udata *udata);