From: Atsushi Nemoto Date: Mon, 16 Apr 2007 14:19:44 +0000 (+0900) Subject: [MIPS] Retry {save,restore}_fp_context if failed in atomic context. X-Git-Tag: v2.6.21~37^2~2 X-Git-Url: http://pilppa.com/gitweb/?a=commitdiff_plain;h=faea62346444ce5b1dba8fb5291d95b676522c42;p=linux-2.6-omap-h63xx.git [MIPS] Retry {save,restore}_fp_context if failed in atomic context. The save_fp_context()/restore_fp_context() might sleep on accessing user stack and therefore might lose FPU ownership in middle of them. If these function failed due to "in_atomic" test in do_page_fault, touch the sigcontext area in non-atomic context and retry these save/restore operation. This is a replacement of a (broken) fix which was titled "Allow CpU exception in kernel partially". Signed-off-by: Atsushi Nemoto Signed-off-by: Ralf Baechle --- diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 297dfcb9752..c0faabd5201 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h @@ -34,4 +34,13 @@ extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall); /* Check and clear pending FPU exceptions in saved CSR */ extern int fpcsr_pending(unsigned int __user *fpcsr); +/* Make sure we will not lose FPU ownership */ +#ifdef CONFIG_PREEMPT +#define lock_fpu_owner() preempt_disable() +#define unlock_fpu_owner() preempt_enable() +#else +#define lock_fpu_owner() pagefault_disable() +#define unlock_fpu_owner() pagefault_enable() +#endif + #endif /* __SIGNAL_COMMON_H */ diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index fa581192de2..07d67309451 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -27,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -78,6 +78,46 @@ struct rt_sigframe { /* * Helper routines */ +static int protected_save_fp_context(struct sigcontext __user *sc) +{ + int err; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(1); + err = save_fp_context(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __put_user(0, &sc->sc_fpregs[0]) | + __put_user(0, &sc->sc_fpregs[31]) | + __put_user(0, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int protected_restore_fp_context(struct sigcontext __user *sc) +{ + int err, tmp; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(0); + err = restore_fp_context(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __get_user(tmp, &sc->sc_fpregs[0]) | + __get_user(tmp, &sc->sc_fpregs[31]) | + __get_user(tmp, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; @@ -113,10 +153,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ - preempt_disable(); - own_fpu(1); - err |= save_fp_context(sc); - preempt_enable(); + err |= protected_save_fp_context(sc); } return err; } @@ -148,10 +185,7 @@ check_and_restore_fp_context(struct sigcontext __user *sc) err = sig = fpcsr_pending(&sc->sc_fpc_csr); if (err > 0) err = 0; - preempt_disable(); - own_fpu(0); - err |= restore_fp_context(sc); - preempt_enable(); + err |= protected_restore_fp_context(sc); return err ?: sig; } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 53a337cfeb6..b9a014411f8 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -29,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -176,6 +176,46 @@ struct rt_sigframe32 { /* * sigcontext handlers */ +static int protected_save_fp_context32(struct sigcontext32 __user *sc) +{ + int err; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(1); + err = save_fp_context32(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __put_user(0, &sc->sc_fpregs[0]) | + __put_user(0, &sc->sc_fpregs[31]) | + __put_user(0, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + +static int protected_restore_fp_context32(struct sigcontext32 __user *sc) +{ + int err, tmp; + while (1) { + lock_fpu_owner(); + own_fpu_inatomic(0); + err = restore_fp_context32(sc); /* this might fail */ + unlock_fpu_owner(); + if (likely(!err)) + break; + /* touch the sigcontext and try again */ + err = __get_user(tmp, &sc->sc_fpregs[0]) | + __get_user(tmp, &sc->sc_fpregs[31]) | + __get_user(tmp, &sc->sc_fpc_csr); + if (err) + break; /* really bad sigcontext */ + } + return err; +} + static int setup_sigcontext32(struct pt_regs *regs, struct sigcontext32 __user *sc) { @@ -209,10 +249,7 @@ static int setup_sigcontext32(struct pt_regs *regs, * Save FPU state to signal context. Signal handler * will "inherit" current FPU state. */ - preempt_disable(); - own_fpu(1); - err |= save_fp_context32(sc); - preempt_enable(); + err |= protected_save_fp_context32(sc); } return err; } @@ -225,10 +262,7 @@ check_and_restore_fp_context32(struct sigcontext32 __user *sc) err = sig = fpcsr_pending(&sc->sc_fpc_csr); if (err > 0) err = 0; - preempt_disable(); - own_fpu(0); - err |= restore_fp_context32(sc); - preempt_enable(); + err |= protected_restore_fp_context32(sc); return err ?: sig; } diff --git a/include/asm-mips/fpu.h b/include/asm-mips/fpu.h index 71436f90203..b414a7d9db4 100644 --- a/include/asm-mips/fpu.h +++ b/include/asm-mips/fpu.h @@ -100,14 +100,19 @@ static inline void __own_fpu(void) set_thread_flag(TIF_USEDFPU); } -static inline void own_fpu(int restore) +static inline void own_fpu_inatomic(int restore) { - preempt_disable(); if (cpu_has_fpu && !__is_fpu_owner()) { __own_fpu(); if (restore) _restore_fp(current); } +} + +static inline void own_fpu(int restore) +{ + preempt_disable(); + own_fpu_inatomic(restore); preempt_enable(); }