]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] Define __raw_get_cpu_var and use it
authorPaul Mackerras <paulus@samba.org>
Sun, 25 Jun 2006 12:47:14 +0000 (05:47 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 25 Jun 2006 17:01:01 +0000 (10:01 -0700)
There are several instances of per_cpu(foo, raw_smp_processor_id()), which
is semantically equivalent to __get_cpu_var(foo) but without the warning
that smp_processor_id() can give if CONFIG_DEBUG_PREEMPT is enabled.  For
those architectures with optimized per-cpu implementations, namely ia64,
powerpc, s390, sparc64 and x86_64, per_cpu() turns into more and slower
code than __get_cpu_var(), so it would be preferable to use __get_cpu_var
on those platforms.

This defines a __raw_get_cpu_var(x) macro which turns into per_cpu(x,
raw_smp_processor_id()) on architectures that use the generic per-cpu
implementation, and turns into __get_cpu_var(x) on the architectures that
have an optimized per-cpu implementation.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/asm-generic/percpu.h
include/asm-ia64/percpu.h
include/asm-powerpc/percpu.h
include/asm-s390/percpu.h
include/asm-sparc64/percpu.h
include/asm-x86_64/percpu.h
kernel/hrtimer.c
kernel/sched.c
kernel/softlockup.c
kernel/timer.c
net/ipv4/route.c

index c0caf433a7d76dc99b8473f4145c250f3446e877..c745211574614bfdc5f8a63c7510bbc4a79e749f 100644 (file)
@@ -14,6 +14,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
 #define __get_cpu_var(var) per_cpu(var, smp_processor_id())
+#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id())
 
 /* A macro to avoid #include hell... */
 #define percpu_modcopy(pcpudst, src, size)                     \
@@ -30,6 +31,7 @@ do {                                                          \
 
 #define per_cpu(var, cpu)                      (*((void)(cpu), &per_cpu__##var))
 #define __get_cpu_var(var)                     per_cpu__##var
+#define __raw_get_cpu_var(var)                 per_cpu__##var
 
 #endif /* SMP */
 
index ae357d504fba81ddd0827df421f6ec317c71e6b0..24d898b650c5ddd0142c4747db2ea8ed06c6f61e 100644 (file)
@@ -42,6 +42,7 @@ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
 
 #define per_cpu(var, cpu)  (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
+#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
 
 extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
 extern void setup_per_cpu_areas (void);
@@ -51,6 +52,7 @@ extern void *per_cpu_init(void);
 
 #define per_cpu(var, cpu)                      (*((void)(cpu), &per_cpu__##var))
 #define __get_cpu_var(var)                     per_cpu__##var
+#define __raw_get_cpu_var(var)                 per_cpu__##var
 #define per_cpu_init()                         (__phys_per_cpu_start)
 
 #endif /* SMP */
index 184a7a4d2fdfe15fb957a55fe828c5945dc2d5a7..faa1fc70305368993778aa8fc3ece4f181b333d2 100644 (file)
@@ -22,6 +22,7 @@
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
+#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
 
 /* A macro to avoid #include hell... */
 #define percpu_modcopy(pcpudst, src, size)                     \
@@ -41,6 +42,7 @@ extern void setup_per_cpu_areas(void);
 
 #define per_cpu(var, cpu)                      (*((void)(cpu), &per_cpu__##var))
 #define __get_cpu_var(var)                     per_cpu__##var
+#define __raw_get_cpu_var(var)                 per_cpu__##var
 
 #endif /* SMP */
 
index 436d216601e56c10f5a6d5063b1bb8b32a9250c9..d9a8cca9b653ad149003583e140a133fe914cf40 100644 (file)
@@ -40,6 +40,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
     __typeof__(type) per_cpu__##name
 
 #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
+#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
 #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
 
 /* A macro to avoid #include hell... */
@@ -57,6 +58,7 @@ do {                                                          \
     __typeof__(type) per_cpu__##name
 
 #define __get_cpu_var(var) __reloc_hide(var,0)
+#define __raw_get_cpu_var(var) __reloc_hide(var,0)
 #define per_cpu(var,cpu) __reloc_hide(var,0)
 
 #endif /* SMP */
index baef13b589525f7b7b9e21fe4dde913def2e40e4..a6ece06b83dbc8144b7475c89c3423ce9b0b5360 100644 (file)
@@ -21,6 +21,7 @@ register unsigned long __local_per_cpu_offset asm("g5");
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
+#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __local_per_cpu_offset))
 
 /* A macro to avoid #include hell... */
 #define percpu_modcopy(pcpudst, src, size)                     \
@@ -37,6 +38,7 @@ do {                                                          \
 
 #define per_cpu(var, cpu)                      (*((void)cpu, &per_cpu__##var))
 #define __get_cpu_var(var)                     per_cpu__##var
+#define __raw_get_cpu_var(var)                 per_cpu__##var
 
 #endif /* SMP */
 
index 7f33aaf9f7b1870c1b58d9de974712c004809182..549eb929b2c0b551952cc9f41aa3e50cbe914f4a 100644 (file)
@@ -21,6 +21,7 @@
 /* var is in discarded region: offset to particular copy we want */
 #define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
 #define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
+#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
 
 /* A macro to avoid #include hell... */
 #define percpu_modcopy(pcpudst, src, size)                     \
@@ -40,6 +41,7 @@ extern void setup_per_cpu_areas(void);
 
 #define per_cpu(var, cpu)                      (*((void)(cpu), &per_cpu__##var))
 #define __get_cpu_var(var)                     per_cpu__##var
+#define __raw_get_cpu_var(var)                 per_cpu__##var
 
 #endif /* SMP */
 
index 18324305724a0b5b6753c1e4dec2219d3e15e9fc..9587aac72f4d80ef655252ec89dcef255613e0bf 100644 (file)
@@ -576,7 +576,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 
        memset(timer, 0, sizeof(struct hrtimer));
 
-       bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
+       bases = __raw_get_cpu_var(hrtimer_bases);
 
        if (clock_id == CLOCK_REALTIME && mode != HRTIMER_ABS)
                clock_id = CLOCK_MONOTONIC;
@@ -599,7 +599,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
 {
        struct hrtimer_base *bases;
 
-       bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
+       bases = __raw_get_cpu_var(hrtimer_bases);
        *tp = ktime_to_timespec(bases[which_clock].resolution);
 
        return 0;
index 5dbc426944779a33eaf30e2cc476906c4b39983a..f8d540b324caf7da51df895f14c91e12f34363c6 100644 (file)
@@ -4152,7 +4152,7 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
 
        atomic_inc(&rq->nr_iowait);
        schedule();
@@ -4163,7 +4163,7 @@ EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
+       struct runqueue *rq = &__raw_get_cpu_var(runqueues);
        long ret;
 
        atomic_inc(&rq->nr_iowait);
index 14c7faf02909e8e4879a057406416a43a34b3fdf..2c1be1163edc4647384b829a6bb9e2db5bb41b5a 100644 (file)
@@ -36,7 +36,7 @@ static struct notifier_block panic_block = {
 
 void touch_softlockup_watchdog(void)
 {
-       per_cpu(touch_timestamp, raw_smp_processor_id()) = jiffies;
+       __raw_get_cpu_var(touch_timestamp) = jiffies;
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
index f35b3939e9372c5139f0588a9f728b681d225d17..eb97371b87d8fab6cef055b873a3ca4333c8656c 100644 (file)
@@ -146,7 +146,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
 void fastcall init_timer(struct timer_list *timer)
 {
        timer->entry.next = NULL;
-       timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
+       timer->base = __raw_get_cpu_var(tvec_bases);
 }
 EXPORT_SYMBOL(init_timer);
 
index cc9423de7311fcac8845b38a055b6ea008b22ec7..60b11aece5c3971a321411b9247723b367ea4538 100644 (file)
@@ -244,7 +244,7 @@ static unsigned int         rt_hash_rnd;
 
 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
 #define RT_CACHE_STAT_INC(field) \
-       (per_cpu(rt_cache_stat, raw_smp_processor_id()).field++)
+       (__raw_get_cpu_var(rt_cache_stat).field++)
 
 static int rt_intern_hash(unsigned hash, struct rtable *rth,
                                struct rtable **res);