]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] s390: spinlock corner case
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Sat, 3 Sep 2005 22:58:05 +0000 (15:58 -0700)
committerLinus Torvalds <torvalds@evo.osdl.org>
Mon, 5 Sep 2005 07:06:29 +0000 (00:06 -0700)
On s390 the lock value used for spinlocks consists of the lower 32 bits of the
PSW that holds the lock.  If this address happens to be on a four gigabyte
boundary the lock is left unlocked.  This allows other cpus to grab the same
lock and enter a lock protected code path concurrently.  In theory this can
happen if the vmalloc area for the code of a module crosses a 4 GB boundary.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/asm-s390/spinlock.h

index 8ff10300f7ee7afbf69f2ada83d387846aa60187..321b23bba1ecf16987b724e45f79319f55972159 100644 (file)
@@ -47,7 +47,7 @@ extern int _raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc);
 
 static inline void _raw_spin_lock(spinlock_t *lp)
 {
-       unsigned long pc = (unsigned long) __builtin_return_address(0);
+       unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
 
        if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
                _raw_spin_lock_wait(lp, pc);
@@ -55,7 +55,7 @@ static inline void _raw_spin_lock(spinlock_t *lp)
 
 static inline int _raw_spin_trylock(spinlock_t *lp)
 {
-       unsigned long pc = (unsigned long) __builtin_return_address(0);
+       unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
 
        if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
                return 1;