]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[PATCH] sparc32 rwlock fix
authorAl Viro <viro@ftp.linux.org.uk>
Sun, 8 Oct 2006 13:32:15 +0000 (14:32 +0100)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sun, 8 Oct 2006 19:32:35 +0000 (12:32 -0700)
read_trylock() is broken on sparc32 (doesn't build and didn't work
right, actually).  Proposed fix:

 - make "writer holds lock" distinguishable from "reader tries to grab
   lock"

 - have __raw_read_trylock() try to acquire the mutex (in LSB of lock),
   terminating spin if we see that there's writer holding it.  Then do
   the rest as we do in read_lock().

Thanks to Ingo for discussion...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/sparc/kernel/sparc_ksyms.c
arch/sparc/lib/locks.S
include/asm-sparc/spinlock.h

index 4d441a554d35406da94a99ff9ac20bd3e035986c..33dadd9f28712a14fe964e56933acb3f80486bab 100644 (file)
@@ -87,6 +87,7 @@ extern void ___set_bit(void);
 extern void ___clear_bit(void);
 extern void ___change_bit(void);
 extern void ___rw_read_enter(void);
+extern void ___rw_read_try(void);
 extern void ___rw_read_exit(void);
 extern void ___rw_write_enter(void);
 
@@ -104,8 +105,9 @@ extern unsigned _Urem(unsigned, unsigned);
 EXPORT_SYMBOL(sparc_cpu_model);
 EXPORT_SYMBOL(kernel_thread);
 #ifdef CONFIG_SMP
-// XXX find what uses (or used) these.
+// XXX find what uses (or used) these.   AV: see asm/spinlock.h
 EXPORT_SYMBOL(___rw_read_enter);
+EXPORT_SYMBOL(___rw_read_try);
 EXPORT_SYMBOL(___rw_read_exit);
 EXPORT_SYMBOL(___rw_write_enter);
 #endif
index 95fa48424967b38bf3311fab599136ddf11d7fb2..b1df55cb2215df6c4858681c61129281e62c53d7 100644 (file)
@@ -25,6 +25,15 @@ ___rw_read_enter_spin_on_wlock:
         ldstub [%g1 + 3], %g2
        b       ___rw_read_enter_spin_on_wlock
         ldub   [%g1 + 3], %g2
+___rw_read_try_spin_on_wlock:
+       andcc   %g2, 0xff, %g0
+       be,a    ___rw_read_try
+        ldstub [%g1 + 3], %g2
+       xnorcc  %g2, 0x0, %o0   /* if g2 is ~0, set o0 to 0 and bugger off */
+       bne,a   ___rw_read_enter_spin_on_wlock
+        ld     [%g1], %g2
+       retl
+        mov    %g4, %o7
 ___rw_read_exit_spin_on_wlock:
        orcc    %g2, 0x0, %g0
        be,a    ___rw_read_exit
@@ -60,6 +69,17 @@ ___rw_read_exit:
        retl
         mov    %g4, %o7
 
+       .globl  ___rw_read_try
+___rw_read_try:
+       orcc    %g2, 0x0, %g0
+       bne     ___rw_read_try_spin_on_wlock
+        ld     [%g1], %g2
+       add     %g2, 1, %g2
+       st      %g2, [%g1]
+       set     1, %o1
+       retl
+        mov    %g4, %o7
+
        .globl  ___rw_write_enter
 ___rw_write_enter:
        orcc    %g2, 0x0, %g0
index 557d08959d2f319ce90b491672c9d4ba518ec471..de2249b267c626bd0066a986afc2d910327a3dce 100644 (file)
@@ -129,6 +129,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
        : /* no outputs */
        : "r" (lp)
        : "g2", "g4", "memory", "cc");
+       *(volatile __u32 *)&lp->lock = ~0U;
 }
 
 static inline int __raw_write_trylock(raw_rwlock_t *rw)
@@ -144,15 +145,40 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
                val = rw->lock & ~0xff;
                if (val)
                        ((volatile u8*)&rw->lock)[3] = 0;
+               else
+                       *(volatile u32*)&rw->lock = ~0U;
        }
 
        return (val == 0);
 }
 
+static inline int __read_trylock(raw_rwlock_t *rw)
+{
+       register raw_rwlock_t *lp asm("g1");
+       register int res asm("o0");
+       lp = rw;
+       __asm__ __volatile__(
+       "mov    %%o7, %%g4\n\t"
+       "call   ___rw_read_try\n\t"
+       " ldstub        [%%g1 + 3], %%g2\n"
+       : "=r" (res)
+       : "r" (lp)
+       : "g2", "g4", "memory", "cc");
+       return res;
+}
+
+#define __raw_read_trylock(lock) \
+({     unsigned long flags; \
+       int res; \
+       local_irq_save(flags); \
+       res = __read_trylock(lock); \
+       local_irq_restore(flags); \
+       res; \
+})
+
 #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
 
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
 
 #define _raw_spin_relax(lock)  cpu_relax()
 #define _raw_read_relax(lock)  cpu_relax()