qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
qp->r_nak_state = 0;
+ qp->r_wrid_valid = 0;
qp->s_rnr_timeout = 0;
qp->s_head = 0;
qp->s_tail = 0;
/**
* ipath_error_qp - put a QP into an error state
* @qp: the QP to put into an error state
+ * @err: the receive completion error to signal if a RWQE is active
*
* Flushes both send and receive work queues.
* QP s_lock should be held and interrupts disabled.
*/
-void ipath_error_qp(struct ipath_qp *qp)
+void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
{
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
list_del_init(&qp->piowait);
spin_unlock(&dev->pending_lock);
- wc.status = IB_WC_WR_FLUSH_ERR;
wc.vendor_err = 0;
wc.byte_len = 0;
wc.imm_data = 0;
wc.sl = 0;
wc.dlid_path_bits = 0;
wc.port_num = 0;
+ if (qp->r_wrid_valid) {
+ qp->r_wrid_valid = 0;
+ wc.status = err;
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
+ }
+ wc.status = IB_WC_WR_FLUSH_ERR;
while (qp->s_last != qp->s_head) {
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
break;
case IB_QPS_ERR:
- ipath_error_qp(qp);
+ ipath_error_qp(qp, IB_WC_GENERAL_ERR);
break;
default:
return 1;
}
+static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
+{
+ spin_lock_irq(&qp->s_lock);
+ qp->state = IB_QPS_ERR;
+ ipath_error_qp(qp, err);
+ spin_unlock_irq(&qp->s_lock);
+}
+
/**
* ipath_rc_rcv - process an incoming RC packet
* @dev: the device this packet came in on
*/
if (qp->r_ack_state >= OP(COMPARE_SWAP))
goto send_ack;
- /* XXX Flush WQEs */
- qp->state = IB_QPS_ERR;
+ ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
qp->r_ack_state = OP(SEND_ONLY);
qp->r_nak_state = IB_NAK_INVALID_REQUEST;
qp->r_ack_psn = qp->r_psn;
goto nack_inv;
ipath_copy_sge(&qp->r_sge, data, tlen);
qp->r_msn++;
- if (opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_ONLY))
+ if (!qp->r_wrid_valid)
break;
+ qp->r_wrid_valid = 0;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;
* is pending though.
*/
if (qp->r_ack_state < OP(COMPARE_SWAP)) {
- /* XXX Flush WQEs */
- qp->state = IB_QPS_ERR;
+ ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
qp->r_ack_state = OP(RDMA_WRITE_ONLY);
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
qp->r_ack_psn = qp->r_psn;
}
}
spin_unlock_irqrestore(&rq->lock, flags);
+ qp->r_wrid_valid = 1;
bail:
return ret;
u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
u8 r_reuse_sge; /* for UC receive errors */
u8 r_sge_inx; /* current index into sg_list */
+ u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
u8 qp_access_flags;
u8 s_max_sge; /* size of s_wq->sg_list */
u8 s_retry_cnt; /* number of times to retry */
int ipath_destroy_qp(struct ib_qp *ibqp);
+void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
+
int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);