]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
Add generic helpers for arch IPI function calls
authorJens Axboe <jens.axboe@oracle.com>
Thu, 26 Jun 2008 09:21:34 +0000 (11:21 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Thu, 26 Jun 2008 09:21:34 +0000 (11:21 +0200)
This adds kernel/smp.c which contains helpers for IPI function calls. In
addition to supporting the existing smp_call_function() in a more efficient
manner, it also adds a more scalable variant called smp_call_function_single()
for calling a given function on a single CPU only.

The core of this is based on the x86-64 patch from Nick Piggin, lots of
changes since then. "Alan D. Brunelle" <Alan.Brunelle@hp.com> has
contributed lots of fixes and suggestions as well. Also thanks to
Paul E. McKenney <paulmck@linux.vnet.ibm.com> for reviewing RCU usage
and getting rid of the data allocation fallback deadlock.

Acked-by: Ingo Molnar <mingo@elte.hu>
Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
arch/Kconfig
arch/sparc64/kernel/smp.c
include/linux/smp.h
init/main.c
kernel/Makefile
kernel/smp.c [new file with mode: 0644]

index 3ea332b009e511f785349c725e716052d42877ef..ad89a33d8c6ef66acb166e49fa423659e18afc9c 100644 (file)
@@ -39,3 +39,6 @@ config HAVE_KRETPROBES
 
 config HAVE_DMA_ATTRS
        def_bool n
+
+config USE_GENERIC_SMP_HELPERS
+       def_bool n
index fa63c68a181941736a7809553d41538a1c5ab074..b82d017a17448dfe965580ea6c95f31cbb911bd5 100644 (file)
@@ -816,8 +816,9 @@ extern unsigned long xcall_call_function;
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-static int smp_call_function_mask(void (*func)(void *info), void *info,
-                                 int nonatomic, int wait, cpumask_t mask)
+static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
+                                         int nonatomic, int wait,
+                                         cpumask_t mask)
 {
        struct call_data_struct data;
        int cpus;
@@ -855,8 +856,8 @@ out_unlock:
 int smp_call_function(void (*func)(void *info), void *info,
                      int nonatomic, int wait)
 {
-       return smp_call_function_mask(func, info, nonatomic, wait,
-                                     cpu_online_map);
+       return sparc64_smp_call_function_mask(func, info, nonatomic, wait,
+                                               cpu_online_map);
 }
 
 void smp_call_function_client(int irq, struct pt_regs *regs)
@@ -893,7 +894,7 @@ static void tsb_sync(void *info)
 
 void smp_tsb_sync(struct mm_struct *mm)
 {
-       smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
+       sparc64_smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
 }
 
 extern unsigned long xcall_flush_tlb_mm;
index 55232ccf9cfdbb020765e89f99b22c7fc9a35af2..eac3e062250f7e24e98ac6355daa5ef280b98dda 100644 (file)
@@ -7,9 +7,19 @@
  */
 
 #include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
 
 extern void cpu_idle(void);
 
+struct call_single_data {
+       struct list_head list;
+       void (*func) (void *info);
+       void *info;
+       unsigned int flags;
+};
+
 #ifdef CONFIG_SMP
 
 #include <linux/preempt.h>
@@ -53,9 +63,28 @@ extern void smp_cpus_done(unsigned int max_cpus);
  * Call a function on all other processors
  */
 int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+                               int wait);
 int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
                                int retry, int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void ipi_call_lock(void);
+void ipi_call_unlock(void);
+void ipi_call_lock_irq(void);
+void ipi_call_unlock_irq(void);
+#else
+static inline void init_call_single_data(void)
+{
+}
+#endif
 
 /*
  * Call a function on all processors
@@ -112,7 +141,9 @@ static inline void smp_send_reschedule(int cpu) { }
 })
 #define smp_call_function_mask(mask, func, info, wait) \
                        (up_smp_call_function(func, info))
-
+static inline void init_call_single_data(void)
+{
+}
 #endif /* !SMP */
 
 /*
index f7fb20021d48d5a4a175cc79d84f6b3cfb9eab53..1efcccff1bdbcbdc760b56e7c5d6cfcc5b221da4 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/start_kernel.h>
 #include <linux/security.h>
+#include <linux/smp.h>
 #include <linux/workqueue.h>
 #include <linux/profile.h>
 #include <linux/rcupdate.h>
@@ -779,6 +780,7 @@ static void __init do_pre_smp_initcalls(void)
 {
        extern int spawn_ksoftirqd(void);
 
+       init_call_single_data();
        migration_init();
        spawn_ksoftirqd();
        if (!nosoftlockup)
index 1c9938addb9d9bc7af3acb74b0a3090acbb8d466..9fa57976f252fe5604047f39981b0dbf90be357a 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
 obj-$(CONFIG_UID16) += uid16.o
diff --git a/kernel/smp.c b/kernel/smp.c
new file mode 100644 (file)
index 0000000..f77b75c
--- /dev/null
@@ -0,0 +1,383 @@
+/*
+ * Generic helpers for smp ipi calls
+ *
+ * (C) Jens Axboe <jens.axboe@oracle.com> 2008
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+
+static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
+static LIST_HEAD(call_function_queue);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
+
+enum {
+       CSD_FLAG_WAIT           = 0x01,
+       CSD_FLAG_ALLOC          = 0x02,
+};
+
+struct call_function_data {
+       struct call_single_data csd;
+       spinlock_t lock;
+       unsigned int refs;
+       cpumask_t cpumask;
+       struct rcu_head rcu_head;
+};
+
+struct call_single_queue {
+       struct list_head list;
+       spinlock_t lock;
+};
+
+void __cpuinit init_call_single_data(void)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               struct call_single_queue *q = &per_cpu(call_single_queue, i);
+
+               spin_lock_init(&q->lock);
+               INIT_LIST_HEAD(&q->list);
+       }
+}
+
+static void csd_flag_wait(struct call_single_data *data)
+{
+       /* Wait for response */
+       do {
+               /*
+                * We need to see the flags store in the IPI handler
+                */
+               smp_mb();
+               if (!(data->flags & CSD_FLAG_WAIT))
+                       break;
+               cpu_relax();
+       } while (1);
+}
+
+/*
+ * Insert a previously allocated call_single_data element for execution
+ * on the given CPU. data must already have ->func, ->info, and ->flags set.
+ */
+static void generic_exec_single(int cpu, struct call_single_data *data)
+{
+       struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
+       int wait = data->flags & CSD_FLAG_WAIT, ipi;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dst->lock, flags);
+       ipi = list_empty(&dst->list);
+       list_add_tail(&data->list, &dst->list);
+       spin_unlock_irqrestore(&dst->lock, flags);
+
+       if (ipi)
+               arch_send_call_function_single_ipi(cpu);
+
+       if (wait)
+               csd_flag_wait(data);
+}
+
+static void rcu_free_call_data(struct rcu_head *head)
+{
+       struct call_function_data *data;
+
+       data = container_of(head, struct call_function_data, rcu_head);
+
+       kfree(data);
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function. Must be called with
+ * interrupts disabled.
+ */
+void generic_smp_call_function_interrupt(void)
+{
+       struct call_function_data *data;
+       int cpu = get_cpu();
+
+       /*
+        * It's ok to use list_for_each_rcu() here even though we may delete
+        * 'pos', since list_del_rcu() doesn't clear ->next
+        */
+       rcu_read_lock();
+       list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
+               int refs;
+
+               if (!cpu_isset(cpu, data->cpumask))
+                       continue;
+
+               data->csd.func(data->csd.info);
+
+               spin_lock(&data->lock);
+               cpu_clear(cpu, data->cpumask);
+               WARN_ON(data->refs == 0);
+               data->refs--;
+               refs = data->refs;
+               spin_unlock(&data->lock);
+
+               if (refs)
+                       continue;
+
+               spin_lock(&call_function_lock);
+               list_del_rcu(&data->csd.list);
+               spin_unlock(&call_function_lock);
+
+               if (data->csd.flags & CSD_FLAG_WAIT) {
+                       /*
+                        * serialize stores to data with the flag clear
+                        * and wakeup
+                        */
+                       smp_wmb();
+                       data->csd.flags &= ~CSD_FLAG_WAIT;
+               } else
+                       call_rcu(&data->rcu_head, rcu_free_call_data);
+       }
+       rcu_read_unlock();
+
+       put_cpu();
+}
+
+/*
+ * Invoked by arch to handle an IPI for call function single. Must be called
+ * from the arch with interrupts disabled.
+ */
+void generic_smp_call_function_single_interrupt(void)
+{
+       struct call_single_queue *q = &__get_cpu_var(call_single_queue);
+       LIST_HEAD(list);
+
+       /*
+        * Need to see other stores to list head for checking whether
+        * list is empty without holding q->lock
+        */
+       smp_mb();
+       while (!list_empty(&q->list)) {
+               unsigned int data_flags;
+
+               spin_lock(&q->lock);
+               list_replace_init(&q->list, &list);
+               spin_unlock(&q->lock);
+
+               while (!list_empty(&list)) {
+                       struct call_single_data *data;
+
+                       data = list_entry(list.next, struct call_single_data,
+                                               list);
+                       list_del(&data->list);
+
+                       /*
+                        * 'data' can be invalid after this call if
+                        * flags == 0 (when called through
+                        * generic_exec_single(), so save them away before
+                        * making the call.
+                        */
+                       data_flags = data->flags;
+
+                       data->func(data->info);
+
+                       if (data_flags & CSD_FLAG_WAIT) {
+                               smp_wmb();
+                               data->flags &= ~CSD_FLAG_WAIT;
+                       } else if (data_flags & CSD_FLAG_ALLOC)
+                               kfree(data);
+               }
+               /*
+                * See comment on outer loop
+                */
+               smp_mb();
+       }
+}
+
+/*
+ * smp_call_function_single - Run a function on a specific CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @retry: Unused
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Note that @wait
+ * will be implicitly turned on in case of allocation failures, since
+ * we fall back to on-stack allocation.
+ */
+int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+                            int retry, int wait)
+{
+       struct call_single_data d;
+       unsigned long flags;
+       /* prevent preemption and reschedule on another processor */
+       int me = get_cpu();
+
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON(irqs_disabled());
+
+       if (cpu == me) {
+               local_irq_save(flags);
+               func(info);
+               local_irq_restore(flags);
+       } else {
+               struct call_single_data *data = NULL;
+
+               if (!wait) {
+                       data = kmalloc(sizeof(*data), GFP_ATOMIC);
+                       if (data)
+                               data->flags = CSD_FLAG_ALLOC;
+               }
+               if (!data) {
+                       data = &d;
+                       data->flags = CSD_FLAG_WAIT;
+               }
+
+               data->func = func;
+               data->info = info;
+               generic_exec_single(cpu, data);
+       }
+
+       put_cpu();
+       return 0;
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
+/**
+ * __smp_call_function_single(): Run a function on another CPU
+ * @cpu: The CPU to run on.
+ * @data: Pre-allocated and setup data structure
+ *
+ * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
+ * data structure. Useful for embedding @data inside other structures, for
+ * instance.
+ *
+ */
+void __smp_call_function_single(int cpu, struct call_single_data *data)
+{
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());
+
+       generic_exec_single(cpu, data);
+}
+
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned. Note that @wait
+ * will be implicitly turned on in case of allocation failures, since
+ * we fall back to on-stack allocation.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. Preemption
+ * must be disabled when calling this function.
+ */
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+                          int wait)
+{
+       struct call_function_data d;
+       struct call_function_data *data = NULL;
+       cpumask_t allbutself;
+       unsigned long flags;
+       int cpu, num_cpus;
+
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON(irqs_disabled());
+
+       cpu = smp_processor_id();
+       allbutself = cpu_online_map;
+       cpu_clear(cpu, allbutself);
+       cpus_and(mask, mask, allbutself);
+       num_cpus = cpus_weight(mask);
+
+       /*
+        * If zero CPUs, return. If just a single CPU, turn this request
+        * into a targetted single call instead since it's faster.
+        */
+       if (!num_cpus)
+               return 0;
+       else if (num_cpus == 1) {
+               cpu = first_cpu(mask);
+               return smp_call_function_single(cpu, func, info, 0, wait);
+       }
+
+       if (!wait) {
+               data = kmalloc(sizeof(*data), GFP_ATOMIC);
+               if (data)
+                       data->csd.flags = CSD_FLAG_ALLOC;
+       }
+       if (!data) {
+               data = &d;
+               data->csd.flags = CSD_FLAG_WAIT;
+       }
+
+       spin_lock_init(&data->lock);
+       data->csd.func = func;
+       data->csd.info = info;
+       data->refs = num_cpus;
+       data->cpumask = mask;
+
+       spin_lock_irqsave(&call_function_lock, flags);
+       list_add_tail_rcu(&data->csd.list, &call_function_queue);
+       spin_unlock_irqrestore(&call_function_lock, flags);
+
+       /* Send a message to all CPUs in the map */
+       arch_send_call_function_ipi(mask);
+
+       /* optionally wait for the CPUs to complete */
+       if (wait)
+               csd_flag_wait(&data->csd);
+
+       return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @natomic: Unused
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func. In case of allocation
+ * failure, @wait will be implicitly turned on.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *), void *info, int natomic, int wait)
+{
+       int ret;
+
+       preempt_disable();
+       ret = smp_call_function_mask(cpu_online_map, func, info, wait);
+       preempt_enable();
+       return ret;
+}
+EXPORT_SYMBOL(smp_call_function);
+
+void ipi_call_lock(void)
+{
+       spin_lock(&call_function_lock);
+}
+
+void ipi_call_unlock(void)
+{
+       spin_unlock(&call_function_lock);
+}
+
+void ipi_call_lock_irq(void)
+{
+       spin_lock_irq(&call_function_lock);
+}
+
+void ipi_call_unlock_irq(void)
+{
+       spin_unlock_irq(&call_function_lock);
+}