]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
include/asm-x86/spinlock.h: checkpatch cleanups - formatting only
authorJoe Perches <joe@perches.com>
Sun, 23 Mar 2008 08:03:31 +0000 (01:03 -0700)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:41:27 +0000 (17:41 +0200)
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/asm-x86/spinlock.h

index 23804c1890ffcd0780ba4c6171896db99d46a658..47dfe2607bb10465b283ae3550991300ea3359e4 100644 (file)
@@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
        short inc = 0x0100;
 
-       __asm__ __volatile__ (
+       asm volatile (
                LOCK_PREFIX "xaddw %w0, %1\n"
                "1:\t"
                "cmpb %h0, %b0\n\t"
@@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
                /* don't need lfence here, because loads are in-order */
                "jmp 1b\n"
                "2:"
-               :"+Q" (inc), "+m" (lock->slock)
+               : "+Q" (inc), "+m" (lock->slock)
                :
-               :"memory", "cc");
+               : "memory", "cc");
 }
 
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -104,30 +104,28 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
        int tmp;
        short new;
 
-       asm volatile(
-               "movw %2,%w0\n\t"
-               "cmpb %h0,%b0\n\t"
-               "jne 1f\n\t"
-               "movw %w0,%w1\n\t"
-               "incb %h1\n\t"
-               "lock ; cmpxchgw %w1,%2\n\t"
-               "1:"
-               "sete %b1\n\t"
-               "movzbl %b1,%0\n\t"
-               :"=&a" (tmp), "=Q" (new), "+m" (lock->slock)
-               :
-               : "memory", "cc");
+       asm volatile("movw %2,%w0\n\t"
+                    "cmpb %h0,%b0\n\t"
+                    "jne 1f\n\t"
+                    "movw %w0,%w1\n\t"
+                    "incb %h1\n\t"
+                    "lock ; cmpxchgw %w1,%2\n\t"
+                    "1:"
+                    "sete %b1\n\t"
+                    "movzbl %b1,%0\n\t"
+                    : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
+                    :
+                    : "memory", "cc");
 
        return tmp;
 }
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__(
-               UNLOCK_LOCK_PREFIX "incb %0"
-               :"+m" (lock->slock)
-               :
-               :"memory", "cc");
+       asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
+                    : "+m" (lock->slock)
+                    :
+                    : "memory", "cc");
 }
 #else
 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
@@ -149,21 +147,20 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
        int inc = 0x00010000;
        int tmp;
 
-       __asm__ __volatile__ (
-               "lock ; xaddl %0, %1\n"
-               "movzwl %w0, %2\n\t"
-               "shrl $16, %0\n\t"
-               "1:\t"
-               "cmpl %0, %2\n\t"
-               "je 2f\n\t"
-               "rep ; nop\n\t"
-               "movzwl %1, %2\n\t"
-               /* don't need lfence here, because loads are in-order */
-               "jmp 1b\n"
-               "2:"
-               :"+Q" (inc), "+m" (lock->slock), "=r" (tmp)
-               :
-               :"memory", "cc");
+       asm volatile("lock ; xaddl %0, %1\n"
+                    "movzwl %w0, %2\n\t"
+                    "shrl $16, %0\n\t"
+                    "1:\t"
+                    "cmpl %0, %2\n\t"
+                    "je 2f\n\t"
+                    "rep ; nop\n\t"
+                    "movzwl %1, %2\n\t"
+                    /* don't need lfence here, because loads are in-order */
+                    "jmp 1b\n"
+                    "2:"
+                    : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
+                    :
+                    : "memory", "cc");
 }
 
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -173,31 +170,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
        int tmp;
        int new;
 
-       asm volatile(
-               "movl %2,%0\n\t"
-               "movl %0,%1\n\t"
-               "roll $16, %0\n\t"
-               "cmpl %0,%1\n\t"
-               "jne 1f\n\t"
-               "addl $0x00010000, %1\n\t"
-               "lock ; cmpxchgl %1,%2\n\t"
-               "1:"
-               "sete %b1\n\t"
-               "movzbl %b1,%0\n\t"
-               :"=&a" (tmp), "=r" (new), "+m" (lock->slock)
-               :
-               : "memory", "cc");
+       asm volatile("movl %2,%0\n\t"
+                    "movl %0,%1\n\t"
+                    "roll $16, %0\n\t"
+                    "cmpl %0,%1\n\t"
+                    "jne 1f\n\t"
+                    "addl $0x00010000, %1\n\t"
+                    "lock ; cmpxchgl %1,%2\n\t"
+                    "1:"
+                    "sete %b1\n\t"
+                    "movzbl %b1,%0\n\t"
+                    : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
+                    :
+                    : "memory", "cc");
 
        return tmp;
 }
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__(
-               UNLOCK_LOCK_PREFIX "incw %0"
-               :"+m" (lock->slock)
-               :
-               :"memory", "cc");
+       asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
+                    : "+m" (lock->slock)
+                    :
+                    : "memory", "cc");
 }
 #endif