]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
x86: more header fixes
authorVegard Nossum <vegard.nossum@gmail.com>
Tue, 10 Jun 2008 21:45:45 +0000 (23:45 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 18 Jun 2008 10:27:03 +0000 (12:27 +0200)
Summary: Add missing include guards for some x86 headers.

This has only had the most rudimentary testing, but is hopefully obviously
correct.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/asm-x86/seccomp_64.h
include/asm-x86/suspend_32.h
include/asm-x86/xor_32.h
include/asm-x86/xor_64.h

index 553af65a2287aa19e9a60f0f2c658e7a5a0d9433..76cfe69aa63c655d969fd80994193697637326bc 100644 (file)
@@ -1,4 +1,5 @@
 #ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
 
 #include <linux/thread_info.h>
 
index 24e1c080aa8a82c9f434285b1bce48a67c2af54a..8675c6782a7da2754a4b391bab931b920db9a009 100644 (file)
@@ -3,6 +3,9 @@
  * Based on code
  * Copyright 2001 Patrick Mochel <mochel@osdl.org>
  */
+#ifndef __ASM_X86_32_SUSPEND_H
+#define __ASM_X86_32_SUSPEND_H
+
 #include <asm/desc.h>
 #include <asm/i387.h>
 
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
 /* routines for saving/restoring kernel state */
 extern int acpi_save_state_mem(void);
 #endif
+
+#endif /* __ASM_X86_32_SUSPEND_H */
index 067b5c1835a3a93e738fb28420273debfc88a482..921b458404495b2ace1ee3f201f5040e2179a2ee 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef ASM_X86__XOR_32_H
+#define ASM_X86__XOR_32_H
+
 /*
  * Optimized RAID-5 checksumming functions for MMX and SSE.
  *
@@ -881,3 +884,5 @@ do {                                                        \
    deals with a load to a line that is being prefetched.  */
 #define XOR_SELECT_TEMPLATE(FASTEST)                   \
        (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
+
+#endif /* ASM_X86__XOR_32_H */
index 24957e39ac8aff0f0e7192d31c4b01abd1d3415d..2d3a18de295b5aa03e82fe209ae6c667f97abfd6 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef ASM_X86__XOR_64_H
+#define ASM_X86__XOR_64_H
+
 /*
  * Optimized RAID-5 checksumming functions for MMX and SSE.
  *
@@ -354,3 +357,5 @@ do {                                                \
    We may also be able to load into the L1 only depending on how the cpu
    deals with a load to a line that is being prefetched.  */
 #define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+
+#endif /* ASM_X86__XOR_64_H */