]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
IB: expand ib_umem_get() prototype
authorArthur Kepner <akepner@sgi.com>
Tue, 29 Apr 2008 08:00:34 +0000 (01:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Apr 2008 15:06:12 +0000 (08:06 -0700)
Add a new parameter, dmasync, to the ib_umem_get() prototype.  Use dmasync = 1
when mapping user-allocated CQs with ib_umem_get().

Signed-off-by: Arthur Kepner <akepner@sgi.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Grant Grundler <grundler@parisc-linux.org>
Cc: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
15 files changed:
drivers/infiniband/core/umem.c
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/ehca/ehca_mrmw.c
drivers/infiniband/hw/ipath/ipath_mr.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/doorbell.c
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/srq.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_user.h
drivers/infiniband/hw/nes/nes_verbs.c
include/rdma/ib_umem.h
include/rdma/ib_verbs.h

index 4e3128ff73c135ce988cdbb9d75fd58981faa26b..fe78f7d250991d120adcbf6c93c50b0ee38078c7 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
+#include <linux/dma-attrs.h>
 
 #include "uverbs.h"
 
@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
  * @addr: userspace virtual address to start at
  * @size: length of region to pin
  * @access: IB_ACCESS_xxx flags for memory being pinned
+ * @dmasync: flush in-flight DMA when the memory region is written
  */
 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-                           size_t size, int access)
+                           size_t size, int access, int dmasync)
 {
        struct ib_umem *umem;
        struct page **page_list;
@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        int ret;
        int off;
        int i;
+       DEFINE_DMA_ATTRS(attrs);
+
+       if (dmasync)
+               dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
        if (!can_do_mlock())
                return ERR_PTR(-EPERM);
@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                                sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
                        }
 
-                       chunk->nmap = ib_dma_map_sg(context->device,
-                                                   &chunk->page_list[0],
-                                                   chunk->nents,
-                                                   DMA_BIDIRECTIONAL);
+                       chunk->nmap = ib_dma_map_sg_attrs(context->device,
+                                                         &chunk->page_list[0],
+                                                         chunk->nents,
+                                                         DMA_BIDIRECTIONAL,
+                                                         &attrs);
                        if (chunk->nmap <= 0) {
                                for (i = 0; i < chunk->nents; ++i)
                                        put_page(sg_page(&chunk->page_list[i]));
index 6af2c0f79a677ab889685e4b54d6ed53c580850d..2acf9b62cf9936acf72f25f2c15bc8bdee41493d 100644 (file)
@@ -452,7 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                return ERR_PTR(-ENOMEM);
        c2mr->pd = c2pd;
 
-       c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+       c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
        if (IS_ERR(c2mr->umem)) {
                err = PTR_ERR(c2mr->umem);
                kfree(c2mr);
index ab4695c1dd5614b3e881ad6d03abca913621ebf6..e343e9e64844fb5c7bfa27178ed3fe6ef35bdf5b 100644 (file)
@@ -602,7 +602,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (!mhp)
                return ERR_PTR(-ENOMEM);
 
-       mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+       mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
        if (IS_ERR(mhp->umem)) {
                err = PTR_ERR(mhp->umem);
                kfree(mhp);
index 46ae4eb2c4e1e096a4bce1bbef180474d64ec335..f974367cad4007475a314237768c358d50a236ac 100644 (file)
@@ -323,7 +323,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        }
 
        e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
-                                mr_access_flags);
+                                mr_access_flags, 0);
        if (IS_ERR(e_mr->umem)) {
                ib_mr = (void *)e_mr->umem;
                goto reg_user_mr_exit1;
index db4ba92f79fcd60736a8bd3d1e1752caa0ab3f5d..9d343b7c2f3b64b7729114a98773accab45baec8 100644 (file)
@@ -195,7 +195,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                goto bail;
        }
 
-       umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags);
+       umem = ib_umem_get(pd->uobject->context, start, length,
+                          mr_access_flags, 0);
        if (IS_ERR(umem))
                return (void *) umem;
 
index 5e570bb0bb6f57fe6912bce4485b8cde60d90658..e3dddfc687f9044cfd377c75b7ec70dfd1921a80 100644 (file)
@@ -137,7 +137,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
        int err;
 
        *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
-                           IB_ACCESS_LOCAL_WRITE);
+                           IB_ACCESS_LOCAL_WRITE, 1);
        if (IS_ERR(*umem))
                return PTR_ERR(*umem);
 
index 8e342cc9baec95430033bf2019547c7a4d90d4e6..8aee4233b388e2938036567e43f78fa425cf0e30 100644 (file)
@@ -63,7 +63,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
        page->user_virt = (virt & PAGE_MASK);
        page->refcnt    = 0;
        page->umem      = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
-                                     PAGE_SIZE, 0);
+                                     PAGE_SIZE, 0, 0);
        if (IS_ERR(page->umem)) {
                err = PTR_ERR(page->umem);
                kfree(page);
index fe2c2e94a5f87031de3c68591c299b0c38bf353f..68e92485fc76f132ea079110691b1c283cd337e0 100644 (file)
@@ -132,7 +132,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (!mr)
                return ERR_PTR(-ENOMEM);
 
-       mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags);
+       mr->umem = ib_umem_get(pd->uobject->context, start, length,
+                              access_flags, 0);
        if (IS_ERR(mr->umem)) {
                err = PTR_ERR(mr->umem);
                goto err_free;
index 80ea8b9e7761656cde75df7de58e33a50f3afdf3..8e02ecfec18872c4d8f5585cd01cf49c5c8b91b6 100644 (file)
@@ -482,7 +482,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        goto err;
 
                qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
-                                      qp->buf_size, 0);
+                                      qp->buf_size, 0, 0);
                if (IS_ERR(qp->umem)) {
                        err = PTR_ERR(qp->umem);
                        goto err;
index 204619702f9d7d9401bf5dd65012fbf2e0062de6..12d6bc6f8007da0ced4158b1a956c7da52bf56c0 100644 (file)
@@ -109,7 +109,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
                }
 
                srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
-                                       buf_size, 0);
+                                       buf_size, 0, 0);
                if (IS_ERR(srq->umem)) {
                        err = PTR_ERR(srq->umem);
                        goto err_srq;
index 696e1f30233268a6f69261ca10dd4fb0beadef28..2a9f460cf06137665d0cfee18b2e15660db192a6 100644 (file)
@@ -1006,17 +1006,23 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        struct mthca_dev *dev = to_mdev(pd->device);
        struct ib_umem_chunk *chunk;
        struct mthca_mr *mr;
+       struct mthca_reg_mr ucmd;
        u64 *pages;
        int shift, n, len;
        int i, j, k;
        int err = 0;
        int write_mtt_size;
 
+       if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+               return ERR_PTR(-EFAULT);
+
        mr = kmalloc(sizeof *mr, GFP_KERNEL);
        if (!mr)
                return ERR_PTR(-ENOMEM);
 
-       mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+       mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
+                              ucmd.mr_attrs & MTHCA_MR_DMASYNC);
+
        if (IS_ERR(mr->umem)) {
                err = PTR_ERR(mr->umem);
                goto err;
index 02cc0a766f3ac767db1aba8333816ec58b402775..f8cb3b664d377b37b0955347cfdbc4c32bcd8e14 100644 (file)
@@ -41,7 +41,7 @@
  * Increment this value if any changes that break userspace ABI
  * compatibility are made.
  */
-#define MTHCA_UVERBS_ABI_VERSION       1
+#define MTHCA_UVERBS_ABI_VERSION       2
 
 /*
  * Make sure that all structs defined in this file remain laid out so
@@ -61,6 +61,14 @@ struct mthca_alloc_pd_resp {
        __u32 reserved;
 };
 
+struct mthca_reg_mr {
+       __u32 mr_attrs;
+#define MTHCA_MR_DMASYNC 0x1
+/* mark the memory region with a DMA attribute that causes
+ * in-flight DMA to be flushed when the region is written to */
+       __u32 reserved;
+};
+
 struct mthca_create_cq {
        __u32 lkey;
        __u32 pdn;
index ee74f7c7a6da85b3b163f0f1a5ab9171691705a5..9ae397a0ff7e4d9d88155a1278ee3cc8b41540e7 100644 (file)
@@ -2377,7 +2377,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        u8 single_page = 1;
        u8 stag_key;
 
-       region = ib_umem_get(pd->uobject->context, start, length, acc);
+       region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
        if (IS_ERR(region)) {
                return (struct ib_mr *)region;
        }
index 22298423cf0b3d0e597e347de0ab376b8bbcaf24..9ee0d2e51b16e5ea04910b5cfff671bd5563aeb8 100644 (file)
@@ -62,7 +62,7 @@ struct ib_umem_chunk {
 #ifdef CONFIG_INFINIBAND_USER_MEM
 
 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-                           size_t size, int access);
+                           size_t size, int access, int dmasync);
 void ib_umem_release(struct ib_umem *umem);
 int ib_umem_page_count(struct ib_umem *umem);
 
@@ -72,7 +72,7 @@ int ib_umem_page_count(struct ib_umem *umem);
 
 static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
                                          unsigned long addr, size_t size,
-                                         int access) {
+                                         int access, int dmasync) {
        return ERR_PTR(-EINVAL);
 }
 static inline void ib_umem_release(struct ib_umem *umem) { }
index 2dcbecce3f61201a13710d104bc69f746f265823..911a661b7278c08ef43d91afaceb1098b2670e15 100644 (file)
@@ -1542,6 +1542,24 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
                dma_unmap_single(dev->dma_device, addr, size, direction);
 }
 
+static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
+                                         void *cpu_addr, size_t size,
+                                         enum dma_data_direction direction,
+                                         struct dma_attrs *attrs)
+{
+       return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
+                                   direction, attrs);
+}
+
+static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
+                                            u64 addr, size_t size,
+                                            enum dma_data_direction direction,
+                                            struct dma_attrs *attrs)
+{
+       return dma_unmap_single_attrs(dev->dma_device, addr, size,
+                                     direction, attrs);
+}
+
 /**
  * ib_dma_map_page - Map a physical page to DMA address
  * @dev: The device for which the dma_addr is to be created
@@ -1611,6 +1629,21 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
                dma_unmap_sg(dev->dma_device, sg, nents, direction);
 }
 
+static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+                                     struct scatterlist *sg, int nents,
+                                     enum dma_data_direction direction,
+                                     struct dma_attrs *attrs)
+{
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+}
+
+static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+                                        struct scatterlist *sg, int nents,
+                                        enum dma_data_direction direction,
+                                        struct dma_attrs *attrs)
+{
+       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+}
 /**
  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
  * @dev: The device for which the DMA addresses were created