]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
[MIPS] Disallow CpU exception in kernel again.
authorAtsushi Nemoto <anemo@mba.ocn.ne.jp>
Fri, 13 Apr 2007 17:37:26 +0000 (02:37 +0900)
committerRalf Baechle <ralf@linux-mips.org>
Fri, 20 Apr 2007 13:58:37 +0000 (14:58 +0100)
The commit 4d40bff7110e9e1a97ff8c01bdd6350e9867cc10 ("Allow CpU
exception in kernel partially") was broken.  The commit was to fix
theoretical problem but broke usual case.  Revert it for now.

Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/traps.c
include/asm-mips/fpu.h
include/asm-mips/thread_info.h

index 28c2e2e6af73e7ff65df2c9d56ada233d601aca0..656bde2e11b14d37186b5d3ec850b5397a78167d 100644 (file)
@@ -49,7 +49,8 @@ LEAF(resume)
 #ifndef CONFIG_CPU_HAS_LLSC
        sw      zero, ll_bit
 #endif
-       mfc0    t2, CP0_STATUS
+       mfc0    t1, CP0_STATUS
+       sw      t1, THREAD_STATUS(a0)
        cpu_save_nonscratch a0
        sw      ra, THREAD_REG31(a0)
 
@@ -59,8 +60,8 @@ LEAF(resume)
        lw      t3, TASK_THREAD_INFO(a0)
        lw      t0, TI_FLAGS(t3)
        li      t1, _TIF_USEDFPU
-       and     t1, t0
-       beqz    t1, 1f
+       and     t2, t0, t1
+       beqz    t2, 1f
        nor     t1, zero, t1
 
        and     t0, t0, t1
@@ -73,13 +74,10 @@ LEAF(resume)
        li      t1, ~ST0_CU1
        and     t0, t0, t1
        sw      t0, ST_OFF(t3)
-       /* clear thread_struct CU1 bit */
-       and     t2, t1
 
        fpu_save_single a0, t0                  # clobbers t0
 
 1:
-       sw      t2, THREAD_STATUS(a0)
        /*
         * The order of restoring the registers takes care of the race
         * updating $28, $29 and kernelsp without disabling ints.
index c7698fd9955c14b121fed998398a4803970aed3c..cc566cf122464f8672685766dd595bc8cc50cceb 100644 (file)
@@ -48,7 +48,8 @@
 #ifndef CONFIG_CPU_HAS_LLSC
        sw      zero, ll_bit
 #endif
-       mfc0    t2, CP0_STATUS
+       mfc0    t1, CP0_STATUS
+       LONG_S  t1, THREAD_STATUS(a0)
        cpu_save_nonscratch a0
        LONG_S  ra, THREAD_REG31(a0)
 
@@ -58,8 +59,8 @@
        PTR_L   t3, TASK_THREAD_INFO(a0)
        LONG_L  t0, TI_FLAGS(t3)
        li      t1, _TIF_USEDFPU
-       and     t1, t0
-       beqz    t1, 1f
+       and     t2, t0, t1
+       beqz    t2, 1f
        nor     t1, zero, t1
 
        and     t0, t0, t1
        li      t1, ~ST0_CU1
        and     t0, t0, t1
        LONG_S  t0, ST_OFF(t3)
-       /* clear thread_struct CU1 bit */
-       and     t2, t1
 
        fpu_save_double a0 t0 t1                # c0_status passed in t0
                                                # clobbers t1
 1:
-       LONG_S  t2, THREAD_STATUS(a0)
 
        /*
         * The order of restoring the registers takes care of the race
index 8c3c5a5789b0935d9a99334e358663e0e91a69e7..fa581192de214cc5428d7c274f4a28993bd5567f 100644 (file)
@@ -113,10 +113,10 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                 * Save FPU state to signal context. Signal handler
                 * will "inherit" current FPU state.
                 */
+               preempt_disable();
                own_fpu(1);
-               enable_fp_in_kernel();
                err |= save_fp_context(sc);
-               disable_fp_in_kernel();
+               preempt_enable();
        }
        return err;
 }
@@ -148,7 +148,10 @@ check_and_restore_fp_context(struct sigcontext __user *sc)
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
+       preempt_disable();
+       own_fpu(0);
        err |= restore_fp_context(sc);
+       preempt_enable();
        return err ?: sig;
 }
 
@@ -187,11 +190,8 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 
        if (used_math) {
                /* restore fpu context if we have used it before */
-               own_fpu(0);
-               enable_fp_in_kernel();
                if (!err)
                        err = check_and_restore_fp_context(sc);
-               disable_fp_in_kernel();
        } else {
                /* signal handler may have used FPU.  Give it up. */
                lose_fpu(0);
index 151fd2f0893ab7e1c2e2f94f91964d74cdfb1ba7..53a337cfeb66d87c2b2e5dd20eb4f45cdf12b824 100644 (file)
@@ -209,10 +209,10 @@ static int setup_sigcontext32(struct pt_regs *regs,
                 * Save FPU state to signal context.  Signal handler
                 * will "inherit" current FPU state.
                 */
+               preempt_disable();
                own_fpu(1);
-               enable_fp_in_kernel();
                err |= save_fp_context32(sc);
-               disable_fp_in_kernel();
+               preempt_enable();
        }
        return err;
 }
@@ -225,7 +225,10 @@ check_and_restore_fp_context32(struct sigcontext32 __user *sc)
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
+       preempt_disable();
+       own_fpu(0);
        err |= restore_fp_context32(sc);
+       preempt_enable();
        return err ?: sig;
 }
 
@@ -261,11 +264,8 @@ static int restore_sigcontext32(struct pt_regs *regs,
 
        if (used_math) {
                /* restore fpu context if we have used it before */
-               own_fpu(0);
-               enable_fp_in_kernel();
                if (!err)
                        err = check_and_restore_fp_context32(sc);
-               disable_fp_in_kernel();
        } else {
                /* signal handler may have used FPU.  Give it up. */
                lose_fpu(0);
index 7d76a85422b21ca5c250484fda8fc9125d734f5f..6b356f62289e64eaf4bbc144c679c21a098ad7ad 100644 (file)
@@ -757,11 +757,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
 {
        unsigned int cpid;
 
+       die_if_kernel("do_cpu invoked from kernel context!", regs);
+
        cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
 
        switch (cpid) {
        case 0:
-               die_if_kernel("do_cpu invoked from kernel context!", regs);
                if (!cpu_has_llsc)
                        if (!simulate_llsc(regs))
                                return;
@@ -772,9 +773,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
                break;
 
        case 1:
-               if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
-                       die_if_kernel("do_cpu invoked from kernel context!",
-                                     regs);
                if (used_math())        /* Using the FPU again.  */
                        own_fpu(1);
                else {                  /* First time FPU user.  */
@@ -782,19 +780,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
                        set_used_math();
                }
 
-               if (raw_cpu_has_fpu) {
-                       if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
-                               local_irq_disable();
-                               if (cpu_has_fpu)
-                                       regs->cp0_status |= ST0_CU1;
-                               /*
-                                * We must return without enabling
-                                * interrupts to ensure keep FPU
-                                * ownership until resume.
-                                */
-                               return;
-                       }
-               } else {
+               if (!raw_cpu_has_fpu) {
                        int sig;
                        sig = fpu_emulator_cop1Handler(regs,
                                                &current->thread.fpu, 0);
@@ -836,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
 
        case 2:
        case 3:
-               die_if_kernel("do_cpu invoked from kernel context!", regs);
                break;
        }
 
index 4e12d1f9534f9d99d43123c0cb517887946b5443..71436f90203fece8abe35aa6f5962eee567b7a36 100644 (file)
@@ -68,8 +68,6 @@ do {                                                                  \
        /* We don't care about the c0 hazard here  */                   \
 } while (0)
 
-#define __fpu_enabled()        (read_c0_status() & ST0_CU1)
-
 #define enable_fpu()                                                   \
 do {                                                                   \
        if (cpu_has_fpu)                                                \
@@ -162,18 +160,4 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
        return tsk->thread.fpu.fpr;
 }
 
-static inline void enable_fp_in_kernel(void)
-{
-       set_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
-       /* make sure CU1 and FPU ownership are consistent */
-       if (!__is_fpu_owner() && __fpu_enabled())
-               __disable_fpu();
-}
-
-static inline void disable_fp_in_kernel(void)
-{
-       BUG_ON(!__is_fpu_owner() && __fpu_enabled());
-       clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
-}
-
 #endif /* _ASM_FPU_H */
index 6cf05f4a4e7e6add8341c8eaaab3a9e36238170d..fbcda82044738b34674f8630aab3905dc2a7bf44 100644 (file)
@@ -119,7 +119,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #define TIF_POLLING_NRFLAG     17      /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE             18
 #define TIF_FREEZE             19
-#define TIF_ALLOW_FP_IN_KERNEL 20
 #define TIF_SYSCALL_TRACE      31      /* syscall trace active */
 
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)