]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
use the new percpu interface for shared data
authorFenghua Yu <fenghua.yu@intel.com>
Thu, 19 Jul 2007 08:48:13 +0000 (01:48 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 19 Jul 2007 17:04:45 +0000 (10:04 -0700)
Currently most of the per cpu data, which is accessed by different cpus,
has a ____cacheline_aligned_in_smp attribute.  Move all this data to the
new per cpu shared data section: .data.percpu.shared_aligned.

This will seperate the percpu data which is referenced frequently by other
cpus from the local only percpu data.

Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/i386/kernel/init_task.c
arch/i386/kernel/irq.c
arch/ia64/kernel/smp.c
arch/x86_64/kernel/init_task.c
kernel/sched.c

index cff95d10a4d8251173ee8ceac7351d774279cbbb..d26fc063a760c508dd0060b784a6d070ff0bf4dc 100644 (file)
@@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task);
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
  * no more per-task TSS's.
  */ 
-DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
 
index d2daf672f4a2375926c52f43f840c16a246b7e89..ba44d40b066d0588f07f9dca68b84f4d197615c3 100644 (file)
@@ -21,7 +21,7 @@
 #include <asm/apic.h>
 #include <asm/uaccess.h>
 
-DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 EXPORT_PER_CPU_SYMBOL(irq_stat);
 
 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
index b3a47f986e1e69e99e5cc980cdff3af3418a368a..9f72838db26ec52c8c0fce68f31e7f6c8b032dbb 100644 (file)
@@ -82,7 +82,7 @@ static volatile struct call_data_struct *call_data;
 #define IPI_KDUMP_CPU_STOP     3
 
 /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
-static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
+static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
 
 extern void cpu_halt (void);
 
index 3dc5854ba21e2af10ca82d50cda1e6f38e13daf7..4ff33d4f855165d638f5232ee8c4d953dd540e15 100644 (file)
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(init_task);
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */ 
-DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
 
 /* Copies of the original ist values from the tss are only accessed during
  * debugging, no special alignment required.
index cb31fb4a1379e23b0628795d5e1bfd64c6031bf0..645256b228c31725ae10f528b206b936e6c12782 100644 (file)
@@ -301,7 +301,7 @@ struct rq {
        struct lock_class_key rq_lock_key;
 };
 
-static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 static DEFINE_MUTEX(sched_hotcpu_mutex);
 
 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)