]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
dmaengine: add DMA_COMPL_SKIP_{SRC,DEST}_UNMAP flags to control dma unmap
authorDan Williams <dan.j.williams@intel.com>
Fri, 4 Jul 2008 07:13:40 +0000 (00:13 -0700)
committerDan Williams <dan.j.williams@intel.com>
Tue, 8 Jul 2008 18:59:12 +0000 (11:59 -0700)
In some cases client code may need the dma-driver to skip the unmap of source
and/or destination buffers.  Setting these flags indicates to the driver to
skip the unmap step.  In this regard async_xor is currently broken in that it
allows the destination buffer to be unmapped while an operation is still in
progress, i.e. when the number of sources exceeds the hardware channel's
maximum (fixed in a subsequent patch).

Acked-by: Saeed Bishara <saeed@marvell.com>
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Acked-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/dma/ioat_dma.c
drivers/dma/iop-adma.c
drivers/dma/mv_xor.c
include/linux/dmaengine.h

index 90e5b0a28cbf7bd01b078bc994c095778e75a4cc..171cad69f318aff337eda68a2892fbdcbba931b2 100644 (file)
@@ -757,6 +757,27 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
               chan->reg_base + IOAT_CHANCTRL_OFFSET);
 }
 
+static void
+ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
+{
+       /*
+        * yes we are unmapping both _page and _single
+        * alloc'd regions with unmap_page. Is this
+        * *really* that bad?
+        */
+       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
+               pci_unmap_page(ioat_chan->device->pdev,
+                               pci_unmap_addr(desc, dst),
+                               pci_unmap_len(desc, len),
+                               PCI_DMA_FROMDEVICE);
+
+       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
+               pci_unmap_page(ioat_chan->device->pdev,
+                               pci_unmap_addr(desc, src),
+                               pci_unmap_len(desc, len),
+                               PCI_DMA_TODEVICE);
+}
+
 /**
  * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
  * @chan: ioat channel to be cleaned up
@@ -817,21 +838,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
                         */
                        if (desc->async_tx.cookie) {
                                cookie = desc->async_tx.cookie;
-
-                               /*
-                                * yes we are unmapping both _page and _single
-                                * alloc'd regions with unmap_page. Is this
-                                * *really* that bad?
-                                */
-                               pci_unmap_page(ioat_chan->device->pdev,
-                                               pci_unmap_addr(desc, dst),
-                                               pci_unmap_len(desc, len),
-                                               PCI_DMA_FROMDEVICE);
-                               pci_unmap_page(ioat_chan->device->pdev,
-                                               pci_unmap_addr(desc, src),
-                                               pci_unmap_len(desc, len),
-                                               PCI_DMA_TODEVICE);
-
+                               ioat_dma_unmap(ioat_chan, desc);
                                if (desc->async_tx.callback) {
                                        desc->async_tx.callback(desc->async_tx.callback_param);
                                        desc->async_tx.callback = NULL;
@@ -890,16 +897,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
                                if (desc->async_tx.cookie) {
                                        cookie = desc->async_tx.cookie;
                                        desc->async_tx.cookie = 0;
-
-                                       pci_unmap_page(ioat_chan->device->pdev,
-                                                     pci_unmap_addr(desc, dst),
-                                                     pci_unmap_len(desc, len),
-                                                     PCI_DMA_FROMDEVICE);
-                                       pci_unmap_page(ioat_chan->device->pdev,
-                                                     pci_unmap_addr(desc, src),
-                                                     pci_unmap_len(desc, len),
-                                                     PCI_DMA_TODEVICE);
-
+                                       ioat_dma_unmap(ioat_chan, desc);
                                        if (desc->async_tx.callback) {
                                                desc->async_tx.callback(desc->async_tx.callback_param);
                                                desc->async_tx.callback = NULL;
index b57564dd023275fc71a03044c0a2e6110bc6b72e..434013d4128899503362e171c0bd26881684aa0f 100644 (file)
@@ -82,17 +82,24 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
                        struct device *dev =
                                &iop_chan->device->pdev->dev;
                        u32 len = unmap->unmap_len;
-                       u32 src_cnt = unmap->unmap_src_cnt;
-                       dma_addr_t addr = iop_desc_get_dest_addr(unmap,
-                               iop_chan);
-
-                       dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
-                       while (src_cnt--) {
-                               addr = iop_desc_get_src_addr(unmap,
-                                                       iop_chan,
-                                                       src_cnt);
-                               dma_unmap_page(dev, addr, len,
-                                       DMA_TO_DEVICE);
+                       enum dma_ctrl_flags flags = desc->async_tx.flags;
+                       u32 src_cnt;
+                       dma_addr_t addr;
+
+                       if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+                               addr = iop_desc_get_dest_addr(unmap, iop_chan);
+                               dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+                       }
+
+                       if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+                               src_cnt = unmap->unmap_src_cnt;
+                               while (src_cnt--) {
+                                       addr = iop_desc_get_src_addr(unmap,
+                                                                    iop_chan,
+                                                                    src_cnt);
+                                       dma_unmap_page(dev, addr, len,
+                                                      DMA_TO_DEVICE);
+                               }
                        }
                        desc->group_head = NULL;
                }
index 8239cfdbc2e67422a0c70a78f00ee3f62102d458..a4e4494663bf9ce4d6f06b0002add3b7cc28c794 100644 (file)
@@ -308,13 +308,23 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
                        struct device *dev =
                                &mv_chan->device->pdev->dev;
                        u32 len = unmap->unmap_len;
-                       u32 src_cnt = unmap->unmap_src_cnt;
-                       dma_addr_t addr = mv_desc_get_dest_addr(unmap);
+                       enum dma_ctrl_flags flags = desc->async_tx.flags;
+                       u32 src_cnt;
+                       dma_addr_t addr;
 
-                       dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
-                       while (src_cnt--) {
-                               addr = mv_desc_get_src_addr(unmap, src_cnt);
-                               dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
+                       if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+                               addr = mv_desc_get_dest_addr(unmap);
+                               dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+                       }
+
+                       if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+                               src_cnt = unmap->unmap_src_cnt;
+                               while (src_cnt--) {
+                                       addr = mv_desc_get_src_addr(unmap,
+                                                                   src_cnt);
+                                       dma_unmap_page(dev, addr, len,
+                                                      DMA_TO_DEVICE);
+                               }
                        }
                        desc->group_head = NULL;
                }
index ba89b0f5056e260004a41a72a12d9b22bd94bf06..b058d6360383f368925daf965adb191a1316c12d 100644 (file)
@@ -102,10 +102,14 @@ enum dma_transaction_type {
  * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
  *     acknowledges receipt, i.e. has has a chance to establish any
  *     dependency chains
+ * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
+ * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
  */
 enum dma_ctrl_flags {
        DMA_PREP_INTERRUPT = (1 << 0),
        DMA_CTRL_ACK = (1 << 1),
+       DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
+       DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
 };
 
 /**