]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: unify prefetch operations
authorGlauber de Oliveira Costa <gcosta@redhat.com>
Wed, 30 Jan 2008 12:31:40 +0000 (13:31 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:31:40 +0000 (13:31 +0100)
This patch moves the prefetch[w]? functions to processor.h

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/asm-x86/processor.h
include/asm-x86/processor_32.h
include/asm-x86/processor_64.h

index c6b749a018a786d94225641800b302143cb40779..bfac9739f57e8ce9eddb47cb2f7e4353f51b1fc6 100644 (file)
@@ -596,6 +596,36 @@ extern char ignore_fpu_irq;
 #define ARCH_HAS_PREFETCHW
 #define ARCH_HAS_SPINLOCK_PREFETCH
 
+#ifdef CONFIG_X86_32
+#define BASE_PREFETCH  ASM_NOP4
+#define ARCH_HAS_PREFETCH
+#else
+#define BASE_PREFETCH  "prefetcht0 (%1)"
+#endif
+
+/* Prefetch instructions for Pentium III and AMD Athlon */
+/* It's not worth to care about 3dnow! prefetches for the K6
+   because they are microcoded there and very slow.
+   However we don't do prefetches for pre XP Athlons currently
+   That should be fixed. */
+static inline void prefetch(const void *x)
+{
+       alternative_input(BASE_PREFETCH,
+                         "prefetchnta (%1)",
+                         X86_FEATURE_XMM,
+                         "r" (x));
+}
+
+/* 3dnow! prefetch to get an exclusive cache line. Useful for
+   spinlocks to avoid one state transition in the cache coherency protocol. */
+static inline void prefetchw(const void *x)
+{
+       alternative_input(BASE_PREFETCH,
+                         "prefetchw (%1)",
+                         X86_FEATURE_3DNOW,
+                         "r" (x));
+}
+
 #define spin_lock_prefetch(x)  prefetchw(x)
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
index 84a4c5e47d575e889ebaa3d5b704ce28ab72742d..61a9cae2364b7108053fcfeb8e315e4a3d35174f 100644 (file)
@@ -228,29 +228,4 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
 
 #define ASM_NOP_MAX 8
 
-/* Prefetch instructions for Pentium III and AMD Athlon */
-/* It's not worth to care about 3dnow! prefetches for the K6
-   because they are microcoded there and very slow.
-   However we don't do prefetches for pre XP Athlons currently
-   That should be fixed. */
-static inline void prefetch(const void *x)
-{
-       alternative_input(ASM_NOP4,
-                         "prefetchnta (%1)",
-                         X86_FEATURE_XMM,
-                         "r" (x));
-}
-
-#define ARCH_HAS_PREFETCH
-
-/* 3dnow! prefetch to get an exclusive cache line. Useful for 
-   spinlocks to avoid one state transition in the cache coherency protocol. */
-static inline void prefetchw(const void *x)
-{
-       alternative_input(ASM_NOP4,
-                         "prefetchw (%1)",
-                         X86_FEATURE_3DNOW,
-                         "r" (x));
-}
-
 #endif /* __ASM_I386_PROCESSOR_H */
index 45e382989b33a9f6e6c6002b5472040ba7caa1c5..08b965124b97bf83dda1e06061f443c2c7b0cff1 100644 (file)
@@ -124,12 +124,4 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
 
 #define ASM_NOP_MAX 8
 
-static inline void prefetchw(void *x) 
-{ 
-       alternative_input("prefetcht0 (%1)",
-                         "prefetchw (%1)",
-                         X86_FEATURE_3DNOW,
-                         "r" (x));
-} 
-
 #endif /* __ASM_X86_64_PROCESSOR_H */