]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: integrate start_secondary
authorGlauber de Oliveira Costa <gcosta@redhat.com>
Wed, 19 Mar 2008 17:26:00 +0000 (14:26 -0300)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:41:03 +0000 (17:41 +0200)
It now looks the same between architectures, so we
merge it in smpboot.c. Minor differences goes inside
an ifdef

Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/smpboot.c
arch/x86/kernel/smpboot_32.c
arch/x86/kernel/smpboot_64.c

index 69c17965f48da5ba510a4f62d4820bbe7e12f7d5..a36ae2785c4872bf45b901c2fa0eaba7d514869d 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/tlbflush.h>
 #include <asm/mtrr.h>
 #include <asm/nmi.h>
+#include <asm/vmi.h>
 #include <linux/mc146818rtc.h>
 
 #include <mach_apic.h>
@@ -229,6 +230,90 @@ void __cpuinit smp_callin(void)
        cpu_set(cpuid, cpu_callin_map);
 }
 
+/*
+ * Activate a secondary processor.
+ */
+void __cpuinit start_secondary(void *unused)
+{
+       /*
+        * Don't put *anything* before cpu_init(), SMP booting is too
+        * fragile that we want to limit the things done here to the
+        * most necessary things.
+        */
+#ifdef CONFIG_VMI
+       vmi_bringup();
+#endif
+       cpu_init();
+       preempt_disable();
+       smp_callin();
+
+       /* otherwise gcc will move up smp_processor_id before the cpu_init */
+       barrier();
+       /*
+        * Check TSC synchronization with the BP:
+        */
+       check_tsc_sync_target();
+
+       if (nmi_watchdog == NMI_IO_APIC) {
+               disable_8259A_irq(0);
+               enable_NMI_through_LVT0();
+               enable_8259A_irq(0);
+       }
+
+       /* This must be done before setting cpu_online_map */
+       set_cpu_sibling_map(raw_smp_processor_id());
+       wmb();
+
+       /*
+        * We need to hold call_lock, so there is no inconsistency
+        * between the time smp_call_function() determines number of
+        * IPI recipients, and the time when the determination is made
+        * for which cpus receive the IPI. Holding this
+        * lock helps us to not include this cpu in a currently in progress
+        * smp_call_function().
+        */
+       lock_ipi_call_lock();
+#ifdef CONFIG_X86_64
+       spin_lock(&vector_lock);
+
+       /* Setup the per cpu irq handling data structures */
+       __setup_vector_irq(smp_processor_id());
+       /*
+        * Allow the master to continue.
+        */
+       spin_unlock(&vector_lock);
+#endif
+       cpu_set(smp_processor_id(), cpu_online_map);
+       unlock_ipi_call_lock();
+       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+
+       setup_secondary_clock();
+
+       wmb();
+       cpu_idle();
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * Everything has been set up for the secondary
+ * CPUs - they just need to reload everything
+ * from the task structure
+ * This function must not return.
+ */
+void __devinit initialize_secondary(void)
+{
+       /*
+        * We don't actually need to load the full TSS,
+        * basically just the stack pointer and the ip.
+        */
+
+       asm volatile(
+               "movl %0,%%esp\n\t"
+               "jmp *%1"
+               :
+               :"m" (current->thread.sp), "m" (current->thread.ip));
+}
+#endif
 
 static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
 {
@@ -533,7 +618,6 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
 }
 #endif /* WAKE_SECONDARY_VIA_NMI */
 
-extern void start_secondary(void *unused);
 #ifdef WAKE_SECONDARY_VIA_INIT
 static int __devinit
 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
index e82eeb2fdfefde6b33cfc4dec68ac2b3353868b6..77b045cfebd450c02388a8c044a219a2a8c765ee 100644 (file)
@@ -80,81 +80,6 @@ extern void unmap_cpu_to_logical_apicid(int cpu);
 /* State of each CPU. */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
-extern void smp_callin(void);
-
-/*
- * Activate a secondary processor.
- */
-void __cpuinit start_secondary(void *unused)
-{
-       /*
-        * Don't put *anything* before cpu_init(), SMP booting is too
-        * fragile that we want to limit the things done here to the
-        * most necessary things.
-        */
-#ifdef CONFIG_VMI
-       vmi_bringup();
-#endif
-       cpu_init();
-       preempt_disable();
-       smp_callin();
-
-       /* otherwise gcc will move up smp_processor_id before the cpu_init */
-       barrier();
-       /*
-        * Check TSC synchronization with the BP:
-        */
-       check_tsc_sync_target();
-
-       if (nmi_watchdog == NMI_IO_APIC) {
-               disable_8259A_irq(0);
-               enable_NMI_through_LVT0();
-               enable_8259A_irq(0);
-       }
-
-       /* This must be done before setting cpu_online_map */
-       set_cpu_sibling_map(raw_smp_processor_id());
-       wmb();
-
-       /*
-        * We need to hold call_lock, so there is no inconsistency
-        * between the time smp_call_function() determines number of
-        * IPI recipients, and the time when the determination is made
-        * for which cpus receive the IPI. Holding this
-        * lock helps us to not include this cpu in a currently in progress
-        * smp_call_function().
-        */
-       lock_ipi_call_lock();
-       cpu_set(smp_processor_id(), cpu_online_map);
-       unlock_ipi_call_lock();
-       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-
-       setup_secondary_clock();
-
-       wmb();
-       cpu_idle();
-}
-
-/*
- * Everything has been set up for the secondary
- * CPUs - they just need to reload everything
- * from the task structure
- * This function must not return.
- */
-void __devinit initialize_secondary(void)
-{
-       /*
-        * We don't actually need to load the full TSS,
-        * basically just the stack pointer and the ip.
-        */
-
-       asm volatile(
-               "movl %0,%%esp\n\t"
-               "jmp *%1"
-               :
-               :"m" (current->thread.sp),"m" (current->thread.ip));
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 void cpu_exit_clear(void)
 {
index 71f13b15bd89f2617066a949c3a7189a828c2a8c..60cd8cf1b07303b42f1667d4cb2c65794bf5da88 100644 (file)
@@ -71,69 +71,6 @@ int smp_threads_ready;
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
-extern void smp_callin(void);
-/*
- * Setup code on secondary processor (after comming out of the trampoline)
- */
-void __cpuinit start_secondary(void)
-{
-       /*
-        * Dont put anything before smp_callin(), SMP
-        * booting is too fragile that we want to limit the
-        * things done here to the most necessary things.
-        */
-       cpu_init();
-       preempt_disable();
-       smp_callin();
-
-       /* otherwise gcc will move up the smp_processor_id before the cpu_init */
-       barrier();
-
-       /*
-        * Check TSC sync first:
-        */
-       check_tsc_sync_target();
-
-       if (nmi_watchdog == NMI_IO_APIC) {
-               disable_8259A_irq(0);
-               enable_NMI_through_LVT0();
-               enable_8259A_irq(0);
-       }
-
-       /*
-        * The sibling maps must be set before turing the online map on for
-        * this cpu
-        */
-       set_cpu_sibling_map(smp_processor_id());
-
-       /*
-        * We need to hold call_lock, so there is no inconsistency
-        * between the time smp_call_function() determines number of
-        * IPI recipients, and the time when the determination is made
-        * for which cpus receive the IPI in genapic_flat.c. Holding this
-        * lock helps us to not include this cpu in a currently in progress
-        * smp_call_function().
-        */
-       lock_ipi_call_lock();
-       spin_lock(&vector_lock);
-
-       /* Setup the per cpu irq handling data structures */
-       __setup_vector_irq(smp_processor_id());
-       /*
-        * Allow the master to continue.
-        */
-       spin_unlock(&vector_lock);
-       cpu_set(smp_processor_id(), cpu_online_map);
-       unlock_ipi_call_lock();
-
-       per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-
-       setup_secondary_clock();
-
-       wmb();
-       cpu_idle();
-}
-
 cycles_t cacheflush_time;
 unsigned long cache_decay_ticks;