]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
softirq: Add support for triggering softirq work on softirqs.
authorDavid S. Miller <davem@davemloft.net>
Wed, 24 Sep 2008 05:15:57 +0000 (22:15 -0700)
committerJens Axboe <jens.axboe@oracle.com>
Fri, 17 Oct 2008 06:46:56 +0000 (08:46 +0200)
This is basically a genericization of Jens Axboe's block layer
remote softirq changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
include/linux/interrupt.h
include/linux/smp.h
kernel/softirq.c

index 54b3623434eca45d1f3758a787d4763aed069b2d..35a61dc60d51ac9878ea9ba6fad94b579403f652 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/hardirq.h>
 #include <linux/sched.h>
 #include <linux/irqflags.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
@@ -273,6 +275,25 @@ extern void softirq_init(void);
 extern void raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq(unsigned int nr);
 
+/* This is the worklist that queues up per-cpu softirq work.
+ *
+ * send_remote_sendirq() adds work to these lists, and
+ * the softirq handler itself dequeues from them.  The queues
+ * are protected by disabling local cpu interrupts and they must
+ * only be accessed by the local cpu that they are for.
+ */
+DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+
+/* Try to send a softirq to a remote cpu.  If this cannot be done, the
+ * work will be queued to the local cpu.
+ */
+extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
+
+/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
+ * and compute the current cpu, passed in as 'this_cpu'.
+ */
+extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
+                                 int this_cpu, int softirq);
 
 /* Tasklets --- multithreaded analogue of BHs.
 
index 66484d4a845944d8e7250f0cd6932febe2c8933b..2e4d58b26c06a451bf53fdec47eb6ea445739934 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/types.h>
 #include <linux/list.h>
 #include <linux/cpumask.h>
 
@@ -16,7 +17,8 @@ struct call_single_data {
        struct list_head list;
        void (*func) (void *info);
        void *info;
-       unsigned int flags;
+       u16 flags;
+       u16 priv;
 };
 
 #ifdef CONFIG_SMP
index 37d67aa2d56fb18cb189f13e6a727ae3570c546e..83ba21a13bd470cea2815d6792e8ff24af43e727 100644 (file)
@@ -6,6 +6,8 @@
  *     Distribute under GPLv2.
  *
  *     Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
+ *
+ *     Remote softirq infrastructure is by Jens Axboe.
  */
 
 #include <linux/module.h>
@@ -474,17 +476,144 @@ void tasklet_kill(struct tasklet_struct *t)
 
 EXPORT_SYMBOL(tasklet_kill);
 
+DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+EXPORT_PER_CPU_SYMBOL(softirq_work_list);
+
+static void __local_trigger(struct call_single_data *cp, int softirq)
+{
+       struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
+
+       list_add_tail(&cp->list, head);
+
+       /* Trigger the softirq only if the list was previously empty.  */
+       if (head->next == &cp->list)
+               raise_softirq_irqoff(softirq);
+}
+
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+static void remote_softirq_receive(void *data)
+{
+       struct call_single_data *cp = data;
+       unsigned long flags;
+       int softirq;
+
+       softirq = cp->priv;
+
+       local_irq_save(flags);
+       __local_trigger(cp, softirq);
+       local_irq_restore(flags);
+}
+
+static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+       if (cpu_online(cpu)) {
+               cp->func = remote_softirq_receive;
+               cp->info = cp;
+               cp->flags = 0;
+               cp->priv = softirq;
+
+               __smp_call_function_single(cpu, cp);
+               return 0;
+       }
+       return 1;
+}
+#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
+static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+       return 1;
+}
+#endif
+
+/**
+ * __send_remote_softirq - try to schedule softirq work on a remote cpu
+ * @cp: private SMP call function data area
+ * @cpu: the remote cpu
+ * @this_cpu: the currently executing cpu
+ * @softirq: the softirq for the work
+ *
+ * Attempt to schedule softirq work on a remote cpu.  If this cannot be
+ * done, the work is instead queued up on the local cpu.
+ *
+ * Interrupts must be disabled.
+ */
+void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
+{
+       if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
+               __local_trigger(cp, softirq);
+}
+EXPORT_SYMBOL(__send_remote_softirq);
+
+/**
+ * send_remote_softirq - try to schedule softirq work on a remote cpu
+ * @cp: private SMP call function data area
+ * @cpu: the remote cpu
+ * @softirq: the softirq for the work
+ *
+ * Like __send_remote_softirq except that disabling interrupts and
+ * computing the current cpu is done for the caller.
+ */
+void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
+{
+       unsigned long flags;
+       int this_cpu;
+
+       local_irq_save(flags);
+       this_cpu = smp_processor_id();
+       __send_remote_softirq(cp, cpu, this_cpu, softirq);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(send_remote_softirq);
+
+static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
+                                              unsigned long action, void *hcpu)
+{
+       /*
+        * If a CPU goes away, splice its entries to the current CPU
+        * and trigger a run of the softirq
+        */
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+               int cpu = (unsigned long) hcpu;
+               int i;
+
+               local_irq_disable();
+               for (i = 0; i < NR_SOFTIRQS; i++) {
+                       struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
+                       struct list_head *local_head;
+
+                       if (list_empty(head))
+                               continue;
+
+                       local_head = &__get_cpu_var(softirq_work_list[i]);
+                       list_splice_init(head, local_head);
+                       raise_softirq_irqoff(i);
+               }
+               local_irq_enable();
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
+       .notifier_call  = remote_softirq_cpu_notify,
+};
+
 void __init softirq_init(void)
 {
        int cpu;
 
        for_each_possible_cpu(cpu) {
+               int i;
+
                per_cpu(tasklet_vec, cpu).tail =
                        &per_cpu(tasklet_vec, cpu).head;
                per_cpu(tasklet_hi_vec, cpu).tail =
                        &per_cpu(tasklet_hi_vec, cpu).head;
+               for (i = 0; i < NR_SOFTIRQS; i++)
+                       INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
        }
 
+       register_hotcpu_notifier(&remote_softirq_cpu_notifier);
+
        open_softirq(TASKLET_SOFTIRQ, tasklet_action);
        open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }