]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
FRV: Add support for emulation of userspace atomic ops [try #2]
authorDavid Howells <dhowells@redhat.com>
Thu, 10 Apr 2008 15:10:55 +0000 (16:10 +0100)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 10 Apr 2008 20:41:29 +0000 (13:41 -0700)
Use traps 120-126 to emulate atomic cmpxchg32, xchg32, and XOR-, OR-, AND-, SUB-
and ADD-to-memory operations for userspace.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/frv/kernel/entry-table.S
arch/frv/kernel/entry.S
arch/frv/kernel/traps.c
include/asm-frv/spr-regs.h

index d3b9253d862ae395721a6c129343e8a4e85e7765..bf35f33e48c99d6c18152184ee9b79af34d4d63f 100644 (file)
@@ -316,8 +316,14 @@ __trap_fixup_kernel_data_tlb_miss:
        .section        .trap.vector
        .org            TBR_TT_TRAP0 >> 2
        .long           system_call
-       .rept           126
+       .rept           119
        .long           __entry_unsupported_trap
        .endr
+
+       # userspace atomic op emulation, traps 120-126
+       .rept           7
+       .long           __entry_atomic_op
+       .endr
+       
        .org            TBR_TT_BREAK >> 2
        .long           __entry_debug_exception
index f36d7f4a7c25eab7458e9d4c19771e23151a3e1c..b8a4b94779b14d137e974f8541729cf133fea099 100644 (file)
@@ -654,6 +654,26 @@ __entry_debug_exception:
        movgs           gr4,psr
        jmpl            @(gr5,gr0)      ; call ill_insn(esfr1,epcr0,esr0)
 
+###############################################################################
+#
+# handle atomic operation emulation for userspace
+#
+###############################################################################
+       .globl          __entry_atomic_op
+__entry_atomic_op:
+       LEDS            0x6012
+       sethi.p         %hi(atomic_operation),gr5
+       setlo           %lo(atomic_operation),gr5
+       movsg           esfr1,gr8
+       movsg           epcr0,gr9
+       movsg           esr0,gr10
+
+       # now that we've accessed the exception regs, we can enable exceptions
+       movsg           psr,gr4
+       ori             gr4,#PSR_ET,gr4
+       movgs           gr4,psr
+       jmpl            @(gr5,gr0)      ; call atomic_operation(esfr1,epcr0,esr0)
+
 ###############################################################################
 #
 # handle media exception
index 2e6098c855787de54e82953bb740110be9ed0011..2f7e66877f3bd17c6b078d00f058f9c1959ff081 100644 (file)
@@ -100,6 +100,233 @@ asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, un
        force_sig_info(info.si_signo, &info, current);
 } /* end illegal_instruction() */
 
+/*****************************************************************************/
+/*
+ * handle atomic operations with errors
+ * - arguments in gr8, gr9, gr10
+ * - original memory value placed in gr5
+ * - replacement memory value placed in gr9
+ */
+asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
+                                unsigned long esr0)
+{
+       static DEFINE_SPINLOCK(atomic_op_lock);
+       unsigned long x, y, z, *p;
+       mm_segment_t oldfs;
+       siginfo_t info;
+       int ret;
+
+       y = 0;
+       z = 0;
+
+       oldfs = get_fs();
+       if (!user_mode(__frame))
+               set_fs(KERNEL_DS);
+
+       switch (__frame->tbr & TBR_TT) {
+               /* TIRA gr0,#120
+                * u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new)
+                */
+       case TBR_TT_ATOMIC_CMPXCHG32:
+               p = (unsigned long *) __frame->gr8;
+               x = __frame->gr9;
+               y = __frame->gr10;
+
+               for (;;) {
+                       ret = get_user(z, p);
+                       if (ret < 0)
+                               goto error;
+
+                       if (z != x)
+                               goto done;
+
+                       spin_lock_irq(&atomic_op_lock);
+
+                       if (__get_user(z, p) == 0) {
+                               if (z != x)
+                                       goto done2;
+
+                               if (__put_user(y, p) == 0)
+                                       goto done2;
+                               goto error2;
+                       }
+
+                       spin_unlock_irq(&atomic_op_lock);
+               }
+
+               /* TIRA gr0,#121
+                * u32 __atomic_kernel_xchg32(void *v, u32 new)
+                */
+       case TBR_TT_ATOMIC_XCHG32:
+               p = (unsigned long *) __frame->gr8;
+               y = __frame->gr9;
+
+               for (;;) {
+                       ret = get_user(z, p);
+                       if (ret < 0)
+                               goto error;
+
+                       spin_lock_irq(&atomic_op_lock);
+
+                       if (__get_user(z, p) == 0) {
+                               if (__put_user(y, p) == 0)
+                                       goto done2;
+                               goto error2;
+                       }
+
+                       spin_unlock_irq(&atomic_op_lock);
+               }
+
+               /* TIRA gr0,#122
+                * ulong __atomic_kernel_XOR_return(ulong i, ulong *v)
+                */
+       case TBR_TT_ATOMIC_XOR:
+               p = (unsigned long *) __frame->gr8;
+               x = __frame->gr9;
+
+               for (;;) {
+                       ret = get_user(z, p);
+                       if (ret < 0)
+                               goto error;
+
+                       spin_lock_irq(&atomic_op_lock);
+
+                       if (__get_user(z, p) == 0) {
+                               y = x ^ z;
+                               if (__put_user(y, p) == 0)
+                                       goto done2;
+                               goto error2;
+                       }
+
+                       spin_unlock_irq(&atomic_op_lock);
+               }
+
+               /* TIRA gr0,#123
+                * ulong __atomic_kernel_OR_return(ulong i, ulong *v)
+                */
+       case TBR_TT_ATOMIC_OR:
+               p = (unsigned long *) __frame->gr8;
+               x = __frame->gr9;
+
+               for (;;) {
+                       ret = get_user(z, p);
+                       if (ret < 0)
+                               goto error;
+
+                       spin_lock_irq(&atomic_op_lock);
+
+                       if (__get_user(z, p) == 0) {
+                               y = x ^ z;
+                               if (__put_user(y, p) == 0)
+                                       goto done2;
+                               goto error2;
+                       }
+
+                       spin_unlock_irq(&atomic_op_lock);
+               }
+
+               /* TIRA gr0,#124
+                * ulong __atomic_kernel_AND_return(ulong i, ulong *v)
+                */
+       case TBR_TT_ATOMIC_AND:
+               p = (unsigned long *) __frame->gr8;
+               x = __frame->gr9;
+
+               for (;;) {
+                       ret = get_user(z, p);
+                       if (ret < 0)
+                               goto error;
+
+                       spin_lock_irq(&atomic_op_lock);
+
+                       if (__get_user(z, p) == 0) {
+                               y = x & z;
+                               if (__put_user(y, p) == 0)
+                                       goto done2;
+                               goto error2;
+                       }
+
+                       spin_unlock_irq(&atomic_op_lock);
+               }
+
+               /* TIRA gr0,#125
+                * int __atomic_user_sub_return(atomic_t *v, int i)
+                */
+       case TBR_TT_ATOMIC_SUB:
+               p = (unsigned long *) __frame->gr8;
+               x = __frame->gr9;
+
+               for (;;) {
+                       ret = get_user(z, p);
+                       if (ret < 0)
+                               goto error;
+
+                       spin_lock_irq(&atomic_op_lock);
+
+                       if (__get_user(z, p) == 0) {
+                               y = z - x;
+                               if (__put_user(y, p) == 0)
+                                       goto done2;
+                               goto error2;
+                       }
+
+                       spin_unlock_irq(&atomic_op_lock);
+               }
+
+               /* TIRA gr0,#126
+                * int __atomic_user_add_return(atomic_t *v, int i)
+                */
+       case TBR_TT_ATOMIC_ADD:
+               p = (unsigned long *) __frame->gr8;
+               x = __frame->gr9;
+
+               for (;;) {
+                       ret = get_user(z, p);
+                       if (ret < 0)
+                               goto error;
+
+                       spin_lock_irq(&atomic_op_lock);
+
+                       if (__get_user(z, p) == 0) {
+                               y = z + x;
+                               if (__put_user(y, p) == 0)
+                                       goto done2;
+                               goto error2;
+                       }
+
+                       spin_unlock_irq(&atomic_op_lock);
+               }
+
+       default:
+               BUG();
+       }
+
+done2:
+       spin_unlock_irq(&atomic_op_lock);
+done:
+       if (!user_mode(__frame))
+               set_fs(oldfs);
+       __frame->gr5 = z;
+       __frame->gr9 = y;
+       return;
+
+error2:
+       spin_unlock_irq(&atomic_op_lock);
+error:
+       if (!user_mode(__frame))
+               set_fs(oldfs);
+       __frame->pc -= 4;
+
+       die_if_kernel("-- Atomic Op Error --\n");
+
+       info.si_signo   = SIGSEGV;
+       info.si_code    = SEGV_ACCERR;
+       info.si_errno   = 0;
+       info.si_addr    = (void *) __frame->pc;
+
+       force_sig_info(info.si_signo, &info, current);
+}
+
 /*****************************************************************************/
 /*
  *
index c2a541ef828d88c87f73fed40dcd0f9b3e85dc99..01e6af5e99b8b6ed2e195e244dbbc3c033c0e437 100644 (file)
 #define TBR_TT_TRAP1           (0x81 << 4)
 #define TBR_TT_TRAP2           (0x82 << 4)
 #define TBR_TT_TRAP3           (0x83 << 4)
+#define TBR_TT_TRAP120         (0xf8 << 4)
+#define TBR_TT_TRAP121         (0xf9 << 4)
+#define TBR_TT_TRAP122         (0xfa << 4)
+#define TBR_TT_TRAP123         (0xfb << 4)
+#define TBR_TT_TRAP124         (0xfc << 4)
+#define TBR_TT_TRAP125         (0xfd << 4)
 #define TBR_TT_TRAP126         (0xfe << 4)
 #define TBR_TT_BREAK           (0xff << 4)
 
+#define TBR_TT_ATOMIC_CMPXCHG32        TBR_TT_TRAP120
+#define TBR_TT_ATOMIC_XCHG32   TBR_TT_TRAP121
+#define TBR_TT_ATOMIC_XOR      TBR_TT_TRAP122
+#define TBR_TT_ATOMIC_OR       TBR_TT_TRAP123
+#define TBR_TT_ATOMIC_AND      TBR_TT_TRAP124
+#define TBR_TT_ATOMIC_SUB      TBR_TT_TRAP125
+#define TBR_TT_ATOMIC_ADD      TBR_TT_TRAP126
+
 #define __get_TBR()    ({ unsigned long x; asm volatile("movsg tbr,%0" : "=r"(x)); x; })
 
 /*