]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
RDMA/cxgb3: MEM_MGT_EXTENSIONS support
authorSteve Wise <swise@opengridcomputing.com>
Tue, 15 Jul 2008 06:48:45 +0000 (23:48 -0700)
committerRoland Dreier <rolandd@cisco.com>
Tue, 15 Jul 2008 06:48:45 +0000 (23:48 -0700)
- set IB_DEVICE_MEM_MGT_EXTENSIONS capability bit if fw supports it.
- set max_fast_reg_page_list_len device attribute.
- add iwch_alloc_fast_reg_mr function.
- add iwch_alloc_fastreg_pbl
- add iwch_free_fastreg_pbl
- adjust the WQ depth for kernel mode work queues to account for
  fastreg possibly taking 2 WR slots.
- add fastreg_mr work request support.
- add local_inv work request support.
- add send_with_inv and send_with_se_inv work request support.
- removed useless duplicate enums/defines for TPT/MW/MR stuff.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/cxgb3/cxio_wr.h
drivers/infiniband/hw/cxgb3/iwch_cq.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.h
drivers/infiniband/hw/cxgb3/iwch_qp.c

index 3f441fc57c1719c6991d028dd5a0979760aa2d30..340e4181c76108a86d8277d0c0e5c33a951e2e09 100644 (file)
@@ -145,7 +145,9 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
        }
        wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
        memset(wqe, 0, sizeof(*wqe));
-       build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 0, qpid, 7);
+       build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
+                      T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
+                      T3_SOPEOP);
        wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
        sge_cmd = qpid << 8 | 3;
        wqe->sge_cmd = cpu_to_be64(sge_cmd);
@@ -558,7 +560,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
        wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
        memset(wqe, 0, sizeof(*wqe));
        build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
-                      T3_CTL_QP_TID, 7);
+                      T3_CTL_QP_TID, 7, T3_SOPEOP);
        wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
        sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
        wqe->sge_cmd = cpu_to_be64(sge_cmd);
@@ -674,7 +676,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
                build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
                               Q_GENBIT(rdev_p->ctrl_qp.wptr,
                                        T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
-                              wr_len);
+                              wr_len, T3_SOPEOP);
                if (flag == T3_COMPLETION_FLAG)
                        ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
                len -= 96;
@@ -816,6 +818,13 @@ int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
                             0, 0);
 }
 
+int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
+{
+       *stag = T3_STAG_UNSET;
+       return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
+                            0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
+}
+
 int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
 {
        struct t3_rdma_init_wr *wqe;
index 6e128f6bab05140282e1d86930aaab98ddb4cc0c..25a880664e6b83e61736a1d2cef1a6bf76854cf9 100644 (file)
@@ -165,6 +165,7 @@ int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
 int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
                   u32 pbl_addr);
 int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
+int cxio_allocate_stag(struct cxio_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr);
 int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
 int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
 void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
index f1a25a821a45049f6c5688b92ca4d5c14b6e9fcf..de760e9f1cc6d3116499180f94ab08ec8bf6df4c 100644 (file)
@@ -72,7 +72,8 @@ enum t3_wr_opcode {
        T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
        T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
        T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
-       T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
+       T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
+       T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
 } __attribute__ ((packed));
 
 enum t3_rdma_opcode {
@@ -89,7 +90,8 @@ enum t3_rdma_opcode {
        T3_FAST_REGISTER,
        T3_LOCAL_INV,
        T3_QP_MOD,
-       T3_BYPASS
+       T3_BYPASS,
+       T3_RDMA_READ_REQ_WITH_INV,
 } __attribute__ ((packed));
 
 static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
@@ -103,6 +105,7 @@ static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
                case T3_WR_BIND: return T3_BIND_MW;
                case T3_WR_INIT: return T3_RDMA_INIT;
                case T3_WR_QP_MOD: return T3_QP_MOD;
+               case T3_WR_FASTREG: return T3_FAST_REGISTER;
                default: break;
        }
        return -1;
@@ -170,11 +173,54 @@ struct t3_send_wr {
        struct t3_sge sgl[T3_MAX_SGE];  /* 4+ */
 };
 
+#define T3_MAX_FASTREG_DEPTH 24
+#define T3_MAX_FASTREG_FRAG 10
+
+struct t3_fastreg_wr {
+       struct fw_riwrh wrh;    /* 0 */
+       union t3_wrid wrid;     /* 1 */
+       __be32 stag;            /* 2 */
+       __be32 len;
+       __be32 va_base_hi;      /* 3 */
+       __be32 va_base_lo_fbo;
+       __be32 page_type_perms; /* 4 */
+       __be32 reserved1;
+       __be64 pbl_addrs[0];    /* 5+ */
+};
+
+/*
+ * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
+ */
+struct t3_pbl_frag {
+       struct fw_riwrh wrh;    /* 0 */
+       __be64 pbl_addrs[14];   /* 1..14 */
+};
+
+#define S_FR_PAGE_COUNT                24
+#define M_FR_PAGE_COUNT                0xff
+#define V_FR_PAGE_COUNT(x)     ((x) << S_FR_PAGE_COUNT)
+#define G_FR_PAGE_COUNT(x)     ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
+
+#define S_FR_PAGE_SIZE         16
+#define M_FR_PAGE_SIZE         0x1f
+#define V_FR_PAGE_SIZE(x)      ((x) << S_FR_PAGE_SIZE)
+#define G_FR_PAGE_SIZE(x)      ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
+
+#define S_FR_TYPE              8
+#define M_FR_TYPE              0x1
+#define V_FR_TYPE(x)           ((x) << S_FR_TYPE)
+#define G_FR_TYPE(x)           ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
+
+#define S_FR_PERMS             0
+#define M_FR_PERMS             0xff
+#define V_FR_PERMS(x)          ((x) << S_FR_PERMS)
+#define G_FR_PERMS(x)          ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
+
 struct t3_local_inv_wr {
        struct fw_riwrh wrh;    /* 0 */
        union t3_wrid wrid;     /* 1 */
        __be32 stag;            /* 2 */
-       __be32 reserved3;
+       __be32 reserved;
 };
 
 struct t3_rdma_write_wr {
@@ -193,7 +239,8 @@ struct t3_rdma_read_wr {
        struct fw_riwrh wrh;    /* 0 */
        union t3_wrid wrid;     /* 1 */
        u8 rdmaop;              /* 2 */
-       u8 reserved[3];
+       u8 local_inv;
+       u8 reserved[2];
        __be32 rem_stag;
        __be64 rem_to;          /* 3 */
        __be32 local_stag;      /* 4 */
@@ -201,18 +248,6 @@ struct t3_rdma_read_wr {
        __be64 local_to;        /* 5 */
 };
 
-enum t3_addr_type {
-       T3_VA_BASED_TO = 0x0,
-       T3_ZERO_BASED_TO = 0x1
-} __attribute__ ((packed));
-
-enum t3_mem_perms {
-       T3_MEM_ACCESS_LOCAL_READ = 0x1,
-       T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
-       T3_MEM_ACCESS_REM_READ = 0x4,
-       T3_MEM_ACCESS_REM_WRITE = 0x8
-} __attribute__ ((packed));
-
 struct t3_bind_mw_wr {
        struct fw_riwrh wrh;    /* 0 */
        union t3_wrid wrid;     /* 1 */
@@ -336,6 +371,11 @@ struct t3_genbit {
        __be64 genbit;
 };
 
+struct t3_wq_in_err {
+       u64 flit[13];
+       u64 err;
+};
+
 enum rdma_init_wr_flags {
        MPA_INITIATOR = (1<<0),
        PRIV_QP = (1<<1),
@@ -346,13 +386,16 @@ union t3_wr {
        struct t3_rdma_write_wr write;
        struct t3_rdma_read_wr read;
        struct t3_receive_wr recv;
+       struct t3_fastreg_wr fastreg;
+       struct t3_pbl_frag pbl_frag;
        struct t3_local_inv_wr local_inv;
        struct t3_bind_mw_wr bind;
        struct t3_bypass_wr bypass;
        struct t3_rdma_init_wr init;
        struct t3_modify_qp_wr qp_mod;
        struct t3_genbit genbit;
-       u64 flit[16];
+       struct t3_wq_in_err wq_in_err;
+       __be64 flit[16];
 };
 
 #define T3_SQ_CQE_FLIT   13
@@ -366,12 +409,18 @@ static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
        return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
 }
 
+enum t3_wr_hdr_bits {
+       T3_EOP = 1,
+       T3_SOP = 2,
+       T3_SOPEOP = T3_EOP|T3_SOP,
+};
+
 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
                                  enum t3_wr_flags flags, u8 genbit, u32 tid,
-                                 u8 len)
+                                 u8 len, u8 sopeop)
 {
        wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
-                                        V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
+                                        V_FW_RIWR_SOPEOP(sopeop) |
                                         V_FW_RIWR_FLAGS(flags));
        wmb();
        wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
@@ -404,6 +453,7 @@ enum tpt_addr_type {
 };
 
 enum tpt_mem_perm {
+       TPT_MW_BIND = 0x10,
        TPT_LOCAL_READ = 0x8,
        TPT_LOCAL_WRITE = 0x4,
        TPT_REMOTE_READ = 0x2,
@@ -659,7 +709,7 @@ struct t3_cq {
 
 static inline void cxio_set_wq_in_error(struct t3_wq *wq)
 {
-       wq->queue->flit[13] = 1;
+       wq->queue->wq_in_err.err = 1;
 }
 
 static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
index 4ee8ccd0a9e52f65f62b20627226fccfac80e0f4..cf5474ae68ff010ae23004c45d4b421a1ff1b49c 100644 (file)
@@ -81,6 +81,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
        wc->wr_id = cookie;
        wc->qp = &qhp->ibqp;
        wc->vendor_err = CQE_STATUS(cqe);
+       wc->wc_flags = 0;
 
        PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
             "lo 0x%x cookie 0x%llx\n", __func__,
@@ -94,6 +95,11 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
                else
                        wc->byte_len = 0;
                wc->opcode = IB_WC_RECV;
+               if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
+                   CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
+                       wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
+                       wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+               }
        } else {
                switch (CQE_OPCODE(cqe)) {
                case T3_RDMA_WRITE:
@@ -105,17 +111,20 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
                        break;
                case T3_SEND:
                case T3_SEND_WITH_SE:
+               case T3_SEND_WITH_INV:
+               case T3_SEND_WITH_SE_INV:
                        wc->opcode = IB_WC_SEND;
                        break;
                case T3_BIND_MW:
                        wc->opcode = IB_WC_BIND_MW;
                        break;
 
-               /* these aren't supported yet */
-               case T3_SEND_WITH_INV:
-               case T3_SEND_WITH_SE_INV:
                case T3_LOCAL_INV:
+                       wc->opcode = IB_WC_LOCAL_INV;
+                       break;
                case T3_FAST_REGISTER:
+                       wc->opcode = IB_WC_FAST_REG_MR;
+                       break;
                default:
                        printk(KERN_ERR MOD "Unexpected opcode %d "
                               "in the CQE received for QPID=0x%0x\n",
index 95f82cfb6c54de3b95a3a98a0e303e9c8f2321a3..5d504f3ed68ba91c7578f0dfb898277e06c23f08 100644 (file)
@@ -768,6 +768,68 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
        return 0;
 }
 
+static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
+{
+       struct iwch_dev *rhp;
+       struct iwch_pd *php;
+       struct iwch_mr *mhp;
+       u32 mmid;
+       u32 stag = 0;
+       int ret;
+
+       php = to_iwch_pd(pd);
+       rhp = php->rhp;
+       mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+       if (!mhp)
+               return ERR_PTR(-ENOMEM);
+
+       mhp->rhp = rhp;
+       ret = iwch_alloc_pbl(mhp, pbl_depth);
+       if (ret) {
+               kfree(mhp);
+               return ERR_PTR(ret);
+       }
+       mhp->attr.pbl_size = pbl_depth;
+       ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
+                                mhp->attr.pbl_size, mhp->attr.pbl_addr);
+       if (ret) {
+               iwch_free_pbl(mhp);
+               kfree(mhp);
+               return ERR_PTR(ret);
+       }
+       mhp->attr.pdid = php->pdid;
+       mhp->attr.type = TPT_NON_SHARED_MR;
+       mhp->attr.stag = stag;
+       mhp->attr.state = 1;
+       mmid = (stag) >> 8;
+       mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+       insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+       PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+       return &(mhp->ibmr);
+}
+
+static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
+                                       struct ib_device *device,
+                                       int page_list_len)
+{
+       struct ib_fast_reg_page_list *page_list;
+
+       page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64),
+                           GFP_KERNEL);
+       if (!page_list)
+               return ERR_PTR(-ENOMEM);
+
+       page_list->page_list = (u64 *)(page_list + 1);
+       page_list->max_page_list_len = page_list_len;
+
+       return page_list;
+}
+
+static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list)
+{
+       kfree(page_list);
+}
+
 static int iwch_destroy_qp(struct ib_qp *ib_qp)
 {
        struct iwch_dev *rhp;
@@ -843,6 +905,15 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
         */
        sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
        wqsize = roundup_pow_of_two(rqsize + sqsize);
+
+       /*
+        * Kernel users need more wq space for fastreg WRs which can take
+        * 2 WR fragments.
+        */
+       ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
+       if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
+               wqsize = roundup_pow_of_two(rqsize +
+                               roundup_pow_of_two(attrs->cap.max_send_wr * 2));
        PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
             wqsize, sqsize, rqsize);
        qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
@@ -851,7 +922,6 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
        qhp->wq.size_log2 = ilog2(wqsize);
        qhp->wq.rq_size_log2 = ilog2(rqsize);
        qhp->wq.sq_size_log2 = ilog2(sqsize);
-       ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
        if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
                           ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
                kfree(qhp);
@@ -1048,6 +1118,7 @@ static int iwch_query_device(struct ib_device *ibdev,
        props->max_mr = dev->attr.max_mem_regs;
        props->max_pd = dev->attr.max_pds;
        props->local_ca_ack_delay = 0;
+       props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
 
        return 0;
 }
@@ -1088,6 +1159,28 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
        return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
 }
 
+static int fw_supports_fastreg(struct iwch_dev *iwch_dev)
+{
+       struct ethtool_drvinfo info;
+       struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
+       char *cp, *next;
+       unsigned fw_maj, fw_min;
+
+       rtnl_lock();
+       lldev->ethtool_ops->get_drvinfo(lldev, &info);
+       rtnl_unlock();
+
+       next = info.fw_version+1;
+       cp = strsep(&next, ".");
+       sscanf(cp, "%i", &fw_maj);
+       cp = strsep(&next, ".");
+       sscanf(cp, "%i", &fw_min);
+
+       PDBG("%s maj %u min %u\n", __func__, fw_maj, fw_min);
+
+       return fw_maj > 6 || (fw_maj == 6 && fw_min > 0);
+}
+
 static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
@@ -1149,8 +1242,10 @@ int iwch_register_device(struct iwch_dev *dev)
        memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
        memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
        dev->ibdev.owner = THIS_MODULE;
-       dev->device_cap_flags =
-           (IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW);
+       dev->device_cap_flags = IB_DEVICE_ZERO_STAG |
+                               IB_DEVICE_MEM_WINDOW;
+       if (fw_supports_fastreg(dev))
+               dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
 
        dev->ibdev.uverbs_cmd_mask =
            (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -1202,6 +1297,9 @@ int iwch_register_device(struct iwch_dev *dev)
        dev->ibdev.alloc_mw = iwch_alloc_mw;
        dev->ibdev.bind_mw = iwch_bind_mw;
        dev->ibdev.dealloc_mw = iwch_dealloc_mw;
+       dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;
+       dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
+       dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
 
        dev->ibdev.attach_mcast = iwch_multicast_attach;
        dev->ibdev.detach_mcast = iwch_multicast_detach;
index 836163fc54291cdd2cbaab8980cf8bba448c68a9..f5ceca05c43512303d4ab0c236f2851c4ab0fa9a 100644 (file)
@@ -296,14 +296,6 @@ static inline u32 iwch_ib_to_tpt_access(int acc)
               TPT_LOCAL_READ;
 }
 
-static inline u32 iwch_ib_to_mwbind_access(int acc)
-{
-       return (acc & IB_ACCESS_REMOTE_WRITE ? T3_MEM_ACCESS_REM_WRITE : 0) |
-              (acc & IB_ACCESS_REMOTE_READ ? T3_MEM_ACCESS_REM_READ : 0) |
-              (acc & IB_ACCESS_LOCAL_WRITE ? T3_MEM_ACCESS_LOCAL_WRITE : 0) |
-              T3_MEM_ACCESS_LOCAL_READ;
-}
-
 enum iwch_mmid_state {
        IWCH_STAG_STATE_VALID,
        IWCH_STAG_STATE_INVALID
index 992613799228e6ab4a8fcee6d781925c5bc1eb85..3b44300a3036cf46cc28567f428f27f5d9d050c2 100644 (file)
@@ -44,54 +44,39 @@ static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
 
        switch (wr->opcode) {
        case IB_WR_SEND:
-       case IB_WR_SEND_WITH_IMM:
                if (wr->send_flags & IB_SEND_SOLICITED)
                        wqe->send.rdmaop = T3_SEND_WITH_SE;
                else
                        wqe->send.rdmaop = T3_SEND;
                wqe->send.rem_stag = 0;
                break;
-#if 0                          /* Not currently supported */
-       case TYPE_SEND_INVALIDATE:
-       case TYPE_SEND_INVALIDATE_IMMEDIATE:
-               wqe->send.rdmaop = T3_SEND_WITH_INV;
-               wqe->send.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
-               break;
-       case TYPE_SEND_SE_INVALIDATE:
-               wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
-               wqe->send.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
+       case IB_WR_SEND_WITH_INV:
+               if (wr->send_flags & IB_SEND_SOLICITED)
+                       wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
+               else
+                       wqe->send.rdmaop = T3_SEND_WITH_INV;
+               wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
                break;
-#endif
        default:
-               break;
+               return -EINVAL;
        }
        if (wr->num_sge > T3_MAX_SGE)
                return -EINVAL;
        wqe->send.reserved[0] = 0;
        wqe->send.reserved[1] = 0;
        wqe->send.reserved[2] = 0;
-       if (wr->opcode == IB_WR_SEND_WITH_IMM) {
-               plen = 4;
-               wqe->send.sgl[0].stag = wr->ex.imm_data;
-               wqe->send.sgl[0].len = __constant_cpu_to_be32(0);
-               wqe->send.num_sgle = __constant_cpu_to_be32(0);
-               *flit_cnt = 5;
-       } else {
-               plen = 0;
-               for (i = 0; i < wr->num_sge; i++) {
-                       if ((plen + wr->sg_list[i].length) < plen) {
-                               return -EMSGSIZE;
-                       }
-                       plen += wr->sg_list[i].length;
-                       wqe->send.sgl[i].stag =
-                           cpu_to_be32(wr->sg_list[i].lkey);
-                       wqe->send.sgl[i].len =
-                           cpu_to_be32(wr->sg_list[i].length);
-                       wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
-               }
-               wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
-               *flit_cnt = 4 + ((wr->num_sge) << 1);
+       plen = 0;
+       for (i = 0; i < wr->num_sge; i++) {
+               if ((plen + wr->sg_list[i].length) < plen)
+                       return -EMSGSIZE;
+
+               plen += wr->sg_list[i].length;
+               wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
+               wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
+               wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
        }
+       wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
+       *flit_cnt = 4 + ((wr->num_sge) << 1);
        wqe->send.plen = cpu_to_be32(plen);
        return 0;
 }
@@ -143,9 +128,12 @@ static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
        if (wr->num_sge > 1)
                return -EINVAL;
        wqe->read.rdmaop = T3_READ_REQ;
+       if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+               wqe->read.local_inv = 1;
+       else
+               wqe->read.local_inv = 0;
        wqe->read.reserved[0] = 0;
        wqe->read.reserved[1] = 0;
-       wqe->read.reserved[2] = 0;
        wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
        wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
        wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
@@ -155,6 +143,57 @@ static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
        return 0;
 }
 
+static int iwch_build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
+                               u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
+{
+       int i;
+       __be64 *p;
+
+       if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
+               return -EINVAL;
+       *wr_cnt = 1;
+       wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
+       wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
+       wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
+       wqe->fastreg.va_base_lo_fbo =
+                               cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
+       wqe->fastreg.page_type_perms = cpu_to_be32(
+               V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
+               V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
+               V_FR_TYPE(TPT_VATO) |
+               V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
+       p = &wqe->fastreg.pbl_addrs[0];
+       for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
+
+               /* If we need a 2nd WR, then set it up */
+               if (i == T3_MAX_FASTREG_FRAG) {
+                       *wr_cnt = 2;
+                       wqe = (union t3_wr *)(wq->queue +
+                               Q_PTR2IDX((wq->wptr+1), wq->size_log2));
+                       build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
+                              Q_GENBIT(wq->wptr + 1, wq->size_log2),
+                              0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
+                              T3_EOP);
+
+                       p = &wqe->pbl_frag.pbl_addrs[0];
+               }
+               *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
+       }
+       *flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
+       if (*flit_cnt > 15)
+               *flit_cnt = 15;
+       return 0;
+}
+
+static int iwch_build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
+                               u8 *flit_cnt)
+{
+       wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
+       wqe->local_inv.reserved = 0;
+       *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
+       return 0;
+}
+
 /*
  * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
  */
@@ -238,6 +277,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        u32 num_wrs;
        unsigned long flag;
        struct t3_swsq *sqp;
+       int wr_cnt = 1;
 
        qhp = to_iwch_qp(ibqp);
        spin_lock_irqsave(&qhp->lock, flag);
@@ -262,15 +302,15 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                t3_wr_flags = 0;
                if (wr->send_flags & IB_SEND_SOLICITED)
                        t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
-               if (wr->send_flags & IB_SEND_FENCE)
-                       t3_wr_flags |= T3_READ_FENCE_FLAG;
                if (wr->send_flags & IB_SEND_SIGNALED)
                        t3_wr_flags |= T3_COMPLETION_FLAG;
                sqp = qhp->wq.sq +
                      Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
                switch (wr->opcode) {
                case IB_WR_SEND:
-               case IB_WR_SEND_WITH_IMM:
+               case IB_WR_SEND_WITH_INV:
+                       if (wr->send_flags & IB_SEND_FENCE)
+                               t3_wr_flags |= T3_READ_FENCE_FLAG;
                        t3_wr_opcode = T3_WR_SEND;
                        err = iwch_build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
                        break;
@@ -280,6 +320,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        err = iwch_build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
                        break;
                case IB_WR_RDMA_READ:
+               case IB_WR_RDMA_READ_WITH_INV:
                        t3_wr_opcode = T3_WR_READ;
                        t3_wr_flags = 0; /* T3 reads are always signaled */
                        err = iwch_build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
@@ -289,6 +330,17 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        if (!qhp->wq.oldest_read)
                                qhp->wq.oldest_read = sqp;
                        break;
+               case IB_WR_FAST_REG_MR:
+                       t3_wr_opcode = T3_WR_FASTREG;
+                       err = iwch_build_fastreg(wqe, wr, &t3_wr_flit_cnt,
+                                                &wr_cnt, &qhp->wq);
+                       break;
+               case IB_WR_LOCAL_INV:
+                       if (wr->send_flags & IB_SEND_FENCE)
+                               t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
+                       t3_wr_opcode = T3_WR_INV_STAG;
+                       err = iwch_build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
+                       break;
                default:
                        PDBG("%s post of type=%d TBD!\n", __func__,
                             wr->opcode);
@@ -307,14 +359,15 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
                build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
                               Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
-                              0, t3_wr_flit_cnt);
+                              0, t3_wr_flit_cnt,
+                              (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
                PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
                     __func__, (unsigned long long) wr->wr_id, idx,
                     Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
                     sqp->opcode);
                wr = wr->next;
                num_wrs--;
-               ++(qhp->wq.wptr);
+               qhp->wq.wptr += wr_cnt;
                ++(qhp->wq.sq_wptr);
        }
        spin_unlock_irqrestore(&qhp->lock, flag);
@@ -359,7 +412,7 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                        wr->wr_id;
                build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
                               Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
-                              0, sizeof(struct t3_receive_wr) >> 3);
+                              0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
                PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
                     "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
                     idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
@@ -419,10 +472,10 @@ int iwch_bind_mw(struct ib_qp *qp,
        sgl.lkey = mw_bind->mr->lkey;
        sgl.length = mw_bind->length;
        wqe->bind.reserved = 0;
-       wqe->bind.type = T3_VA_BASED_TO;
+       wqe->bind.type = TPT_VATO;
 
        /* TBD: check perms */
-       wqe->bind.perms = iwch_ib_to_mwbind_access(mw_bind->mw_access_flags);
+       wqe->bind.perms = iwch_ib_to_tpt_access(mw_bind->mw_access_flags);
        wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
        wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
        wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
@@ -430,7 +483,7 @@ int iwch_bind_mw(struct ib_qp *qp,
        err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
        if (err) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               return err;
+               return err;
        }
        wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
        sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
@@ -441,10 +494,9 @@ int iwch_bind_mw(struct ib_qp *qp,
        sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
        wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
        wqe->bind.mr_pagesz = page_size;
-       wqe->flit[T3_SQ_COOKIE_FLIT] = mw_bind->wr_id;
        build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
                       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
-                               sizeof(struct t3_bind_mw_wr) >> 3);
+                      sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
        ++(qhp->wq.wptr);
        ++(qhp->wq.sq_wptr);
        spin_unlock_irqrestore(&qhp->lock, flag);