]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
dmaengine: provide a common 'issue_pending_all' implementation
authorDan Williams <dan.j.williams@intel.com>
Tue, 6 Jan 2009 18:38:14 +0000 (11:38 -0700)
committerDan Williams <dan.j.williams@intel.com>
Tue, 6 Jan 2009 18:38:14 +0000 (11:38 -0700)
async_tx and net_dma each have open-coded versions of issue_pending_all,
so provide a common routine in dmaengine.

The implementation needs to walk the global device list, so implement
rcu to allow dma_issue_pending_all to run lockless.  Clients protect
themselves from channel removal events by holding a dmaengine reference.

Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
crypto/async_tx/async_tx.c
drivers/dma/dmaengine.c
include/linux/async_tx.h
include/linux/dmaengine.h
net/core/dev.c

index b88bb1f608fc09d932d4497f9733ae330dff2b35..2cdf7a0867b7a1c0de573168a1d578646c4c2b84 100644 (file)
@@ -45,18 +45,6 @@ static DEFINE_SPINLOCK(async_tx_lock);
 
 static LIST_HEAD(async_tx_master_list);
 
-/* async_tx_issue_pending_all - start all transactions on all channels */
-void async_tx_issue_pending_all(void)
-{
-       struct dma_chan_ref *ref;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-               ref->chan->device->device_issue_pending(ref->chan);
-       rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
-
 static void
 free_dma_chan_ref(struct rcu_head *rcu)
 {
index 87a8cd4791ed0adcab1b086db3876702442c5259..418eca28d472baf28909c6b4f30110dcef8af61e 100644 (file)
@@ -70,6 +70,7 @@
 #include <linux/rcupdate.h>
 #include <linux/mutex.h>
 #include <linux/jiffies.h>
+#include <linux/rculist.h>
 
 static DEFINE_MUTEX(dma_list_mutex);
 static LIST_HEAD(dma_device_list);
@@ -365,6 +366,26 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 }
 EXPORT_SYMBOL(dma_find_channel);
 
+/**
+ * dma_issue_pending_all - flush all pending operations across all channels
+ */
+void dma_issue_pending_all(void)
+{
+       struct dma_device *device;
+       struct dma_chan *chan;
+
+       WARN_ONCE(dmaengine_ref_count == 0,
+                 "client called %s without a reference", __func__);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(device, &dma_device_list, global_node)
+               list_for_each_entry(chan, &device->channels, device_node)
+                       if (chan->client_count)
+                               device->device_issue_pending(chan);
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(dma_issue_pending_all);
+
 /**
  * nth_chan - returns the nth channel of the given capability
  * @cap: capability to match
@@ -490,7 +511,7 @@ void dma_async_client_register(struct dma_client *client)
                        err = dma_chan_get(chan);
                        if (err == -ENODEV) {
                                /* module removed before we could use it */
-                               list_del_init(&device->global_node);
+                               list_del_rcu(&device->global_node);
                                break;
                        } else if (err)
                                pr_err("dmaengine: failed to get %s: (%d)\n",
@@ -635,7 +656,7 @@ int dma_async_device_register(struct dma_device *device)
                                goto err_out;
                        }
                }
-       list_add_tail(&device->global_node, &dma_device_list);
+       list_add_tail_rcu(&device->global_node, &dma_device_list);
        dma_channel_rebalance();
        mutex_unlock(&dma_list_mutex);
 
@@ -677,7 +698,7 @@ void dma_async_device_unregister(struct dma_device *device)
        struct dma_chan *chan;
 
        mutex_lock(&dma_list_mutex);
-       list_del(&device->global_node);
+       list_del_rcu(&device->global_node);
        dma_channel_rebalance();
        mutex_unlock(&dma_list_mutex);
 
index 1c816775f1352fc67ebbce00bdd7d27ae4465a54..45f6297821bd6c7afee48e247d8a25b027bc7445 100644 (file)
@@ -59,7 +59,7 @@ enum async_tx_flags {
 };
 
 #ifdef CONFIG_DMA_ENGINE
-void async_tx_issue_pending_all(void);
+#define async_tx_issue_pending_all dma_issue_pending_all
 #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
 #include <asm/async_tx.h>
 #else
index b466f02e2433dd3b01d2d1e7e3b76e46d7f2c2e3..57a43adfc39ee35de82a9ecae8df54de6e5061bd 100644 (file)
@@ -471,6 +471,7 @@ int dma_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+void dma_issue_pending_all(void);
 
 /* --- Helper iov-locking functions --- */
 
index 09c66a449da6c7c72c1bca4bfb8e392281d84bbb..e40b0d57f8ff8040dd7101cafd5ec7e7dd80d2b5 100644 (file)
@@ -2635,14 +2635,7 @@ out:
         * There may not be any more sk_buffs coming right now, so push
         * any pending DMA copies to hardware
         */
-       if (!cpus_empty(net_dma.channel_mask)) {
-               int chan_idx;
-               for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
-                       struct dma_chan *chan = net_dma.channels[chan_idx];
-                       if (chan)
-                               dma_async_memcpy_issue_pending(chan);
-               }
-       }
+       dma_issue_pending_all();
 #endif
 
        return;