]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
IB/mlx4: Add IPoIB LSO support
authorEli Cohen <eli@dev.mellanox.co.il>
Thu, 17 Apr 2008 04:09:27 +0000 (21:09 -0700)
committerRoland Dreier <rolandd@cisco.com>
Thu, 17 Apr 2008 04:09:27 +0000 (21:09 -0700)
Add TSO support to the mlx4_ib driver.

Signed-off-by: Eli Cohen <eli@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/net/mlx4/fw.c
drivers/net/mlx4/fw.h
drivers/net/mlx4/main.c
include/linux/mlx4/device.h
include/linux/mlx4/qp.h

index d2e32b03e2f75910bfef1a71ba5448a9712b4aa7..7d70af7952b01a8c9e28f0d07a8c4ac7de8854e5 100644 (file)
@@ -420,6 +420,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
                case MLX4_OPCODE_BIND_MW:
                        wc->opcode    = IB_WC_BIND_MW;
                        break;
+               case MLX4_OPCODE_LSO:
+                       wc->opcode    = IB_WC_LSO;
+                       break;
                }
        } else {
                wc->byte_len = be32_to_cpu(cqe->byte_cnt);
index 6ea4746c2e9b209bf79426a2cae41104378dcafa..e9330a0d6c037ffbd45511e454994aaaf7cd780a 100644 (file)
@@ -101,6 +101,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
                props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
        if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+       if (dev->dev->caps.max_gso_sz)
+               props->device_cap_flags |= IB_DEVICE_UD_TSO;
 
        props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
                0xffffff;
index 3726e451a327201d037016c4c40173932710d126..3f8bd0a37b9653511673b75d16b11acdc55be9c1 100644 (file)
@@ -110,6 +110,10 @@ struct mlx4_ib_wq {
        unsigned                tail;
 };
 
+enum mlx4_ib_qp_flags {
+       MLX4_IB_QP_LSO          = 1 << 0
+};
+
 struct mlx4_ib_qp {
        struct ib_qp            ibqp;
        struct mlx4_qp          mqp;
@@ -129,6 +133,7 @@ struct mlx4_ib_qp {
        struct mlx4_mtt         mtt;
        int                     buf_size;
        struct mutex            mutex;
+       u32                     flags;
        u8                      port;
        u8                      alt_port;
        u8                      atomic_rd_en;
index 320c25fa74b1638ae92033cd0a77a173a90691fe..2ba24308408938f84f20b76fee4fa309bf25e47a 100644 (file)
@@ -71,6 +71,7 @@ enum {
 
 static const __be32 mlx4_ib_opcode[] = {
        [IB_WR_SEND]                    = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
+       [IB_WR_LSO]                     = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
        [IB_WR_SEND_WITH_IMM]           = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
        [IB_WR_RDMA_WRITE]              = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
        [IB_WR_RDMA_WRITE_WITH_IMM]     = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
@@ -242,7 +243,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
        }
 }
 
-static int send_wqe_overhead(enum ib_qp_type type)
+static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
 {
        /*
         * UD WQEs must have a datagram segment.
@@ -253,7 +254,8 @@ static int send_wqe_overhead(enum ib_qp_type type)
        switch (type) {
        case IB_QPT_UD:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
-                       sizeof (struct mlx4_wqe_datagram_seg);
+                       sizeof (struct mlx4_wqe_datagram_seg) +
+                       ((flags & MLX4_IB_QP_LSO) ? 64 : 0);
        case IB_QPT_UC:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
                        sizeof (struct mlx4_wqe_raddr_seg);
@@ -315,7 +317,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
        /* Sanity check SQ size before proceeding */
        if (cap->max_send_wr     > dev->dev->caps.max_wqes  ||
            cap->max_send_sge    > dev->dev->caps.max_sq_sg ||
-           cap->max_inline_data + send_wqe_overhead(type) +
+           cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
            sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
                return -EINVAL;
 
@@ -329,7 +331,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
 
        s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
                cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
-               send_wqe_overhead(type);
+               send_wqe_overhead(type, qp->flags);
 
        /*
         * Hermon supports shrinking WQEs, such that a single work
@@ -394,7 +396,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
        }
 
        qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
-                        send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg);
+                        send_wqe_overhead(type, qp->flags)) /
+               sizeof (struct mlx4_wqe_data_seg);
 
        qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
                (qp->sq.wqe_cnt << qp->sq.wqe_shift);
@@ -503,6 +506,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        } else {
                qp->sq_no_prefetch = 0;
 
+               if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
+                       qp->flags |= MLX4_IB_QP_LSO;
+
                err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
                if (err)
                        goto err;
@@ -673,7 +679,11 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
        struct mlx4_ib_qp *qp;
        int err;
 
-       if (init_attr->create_flags)
+       /* We only support LSO, and only for kernel UD QPs. */
+       if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
+               return ERR_PTR(-EINVAL);
+       if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
+           (pd->uobject || init_attr->qp_type != IB_QPT_UD))
                return ERR_PTR(-EINVAL);
 
        switch (init_attr->qp_type) {
@@ -879,10 +889,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                }
        }
 
-       if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
-           ibqp->qp_type == IB_QPT_UD)
+       if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
                context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
-       else if (attr_mask & IB_QP_PATH_MTU) {
+       else if (ibqp->qp_type == IB_QPT_UD) {
+               if (qp->flags & MLX4_IB_QP_LSO)
+                       context->mtu_msgmax = (IB_MTU_4096 << 5) |
+                                             ilog2(dev->dev->caps.max_gso_sz);
+               else
+                       context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
+       } else if (attr_mask & IB_QP_PATH_MTU) {
                if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
                        printk(KERN_ERR "path MTU (%u) is invalid\n",
                               attr->path_mtu);
@@ -1399,6 +1414,34 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
        dseg->addr       = cpu_to_be64(sg->addr);
 }
 
+static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
+                        struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
+{
+       unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
+
+       /*
+        * This is a temporary limitation and will be removed in
+        * a forthcoming FW release:
+        */
+       if (unlikely(halign > 64))
+               return -EINVAL;
+
+       if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
+                    wr->num_sge > qp->sq.max_gs - (halign >> 4)))
+               return -EINVAL;
+
+       memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
+
+       /* make sure LSO header is written before overwriting stamping */
+       wmb();
+
+       wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
+                                       wr->wr.ud.hlen);
+
+       *lso_seg_len = halign;
+       return 0;
+}
+
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                      struct ib_send_wr **bad_wr)
 {
@@ -1412,6 +1455,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        unsigned ind;
        int uninitialized_var(stamp);
        int uninitialized_var(size);
+       unsigned seglen;
        int i;
 
        spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1490,6 +1534,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        set_datagram_seg(wqe, wr);
                        wqe  += sizeof (struct mlx4_wqe_datagram_seg);
                        size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
+
+                       if (wr->opcode == IB_WR_LSO) {
+                               err = build_lso_seg(wqe, wr, qp, &seglen);
+                               if (unlikely(err)) {
+                                       *bad_wr = wr;
+                                       goto out;
+                               }
+                               wqe  += seglen;
+                               size += seglen / 16;
+                       }
                        break;
 
                case IB_QPT_SMI:
index f494c3e8bce33357ba6fd13cb16f73207affff12..d82f2751d2c713e4a7f97579a9881b0d27a16129 100644 (file)
@@ -133,6 +133,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_MAX_AV_OFFSET            0x27
 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET                0x29
 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET                0x2b
+#define QUERY_DEV_CAP_MAX_GSO_OFFSET           0x2d
 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET          0x2f
 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET           0x33
 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET         0x35
@@ -215,6 +216,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
        dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
+       field &= 0x1f;
+       if (!field)
+               dev_cap->max_gso_sz = 0;
+       else
+               dev_cap->max_gso_sz = 1 << field;
+
        MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
        dev_cap->max_rdma_global = 1 << (field & 0x3f);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
@@ -377,6 +385,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
        mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
                 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
+       mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
 
        dump_dev_cap_flags(dev, dev_cap->flags);
 
index e16dec890413dddbfcce48e7b79686f068703c8c..306cb9b0242d654bda6ba16ba8cc0c728e589af9 100644 (file)
@@ -96,6 +96,7 @@ struct mlx4_dev_cap {
        u8  bmme_flags;
        u32 reserved_lkey;
        u64 max_icm_sz;
+       int max_gso_sz;
 };
 
 struct mlx4_adapter {
index 08bfc130a33eb7d63dac009d338e4464a34b4499..7cfbe75114d1de19f2cf214d53f86cac1f6a53a4 100644 (file)
@@ -159,6 +159,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
        dev->caps.flags              = dev_cap->flags;
        dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
+       dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
 
        return 0;
 }
index 6cdf813cd47883b5c38b674003ea650c3a532709..ff7df1a2222fea4d5d5056280a0fba76eb84aab9 100644 (file)
@@ -186,6 +186,7 @@ struct mlx4_caps {
        u32                     flags;
        u16                     stat_rate_support;
        u8                      port_width_cap[MLX4_MAX_PORTS + 1];
+       int                     max_gso_sz;
 };
 
 struct mlx4_buf_list {
index 31f9eb3ccbb376ddf07fd4dbbc237b1f434c2c5a..a5e43febee4fa550a6a4a9d145ed0c893acd76f8 100644 (file)
@@ -219,6 +219,11 @@ struct mlx4_wqe_datagram_seg {
        __be32                  reservd[2];
 };
 
+struct mlx4_lso_seg {
+       __be32                  mss_hdr_size;
+       __be32                  header[0];
+};
+
 struct mlx4_wqe_bind_seg {
        __be32                  flags1;
        __be32                  flags2;