summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/fpu/core.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-04-30 09:29:38 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-19 15:48:06 +0200
commite1cebad49c54e0241e101ebf63d5238fe1137749 (patch)
tree8896836c0c576ff4ea58ff11cb509b4e32498751 /arch/x86/kernel/fpu/core.c
parentacd58a3ad0ed5875fb88bbb078309a3d7ee147a0 (diff)
downloadlinux-e1cebad49c54e0241e101ebf63d5238fe1137749.tar.gz
linux-e1cebad49c54e0241e101ebf63d5238fe1137749.tar.bz2
linux-e1cebad49c54e0241e101ebf63d5238fe1137749.zip
x86/fpu: Factor out the exception error code handling code
Factor out the FPU error code handling code from traps.c and fpu/internal.h and move them close to each other. Also convert the helper functions to 'struct fpu *', which further simplifies them. Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/fpu/core.c')
-rw-r--r--arch/x86/kernel/fpu/core.c88
1 files changed, 88 insertions, 0 deletions
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 07f1cc9d18d9..4809c32149e8 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -8,6 +8,7 @@
#include <asm/fpu/internal.h>
#include <asm/fpu/regset.h>
#include <asm/fpu/signal.h>
+#include <asm/traps.h>
#include <linux/hardirq.h>
@@ -749,3 +750,90 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
EXPORT_SYMBOL(dump_fpu);
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+
+/*
+ * x87 math exception handling:
+ */
+
+static inline unsigned short get_fpu_cwd(struct fpu *fpu)
+{
+ if (cpu_has_fxsr) {
+ return fpu->state.fxsave.cwd;
+ } else {
+ return (unsigned short)fpu->state.fsave.cwd;
+ }
+}
+
+static inline unsigned short get_fpu_swd(struct fpu *fpu)
+{
+ if (cpu_has_fxsr) {
+ return fpu->state.fxsave.swd;
+ } else {
+ return (unsigned short)fpu->state.fsave.swd;
+ }
+}
+
+static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
+{
+ if (cpu_has_xmm) {
+ return fpu->state.fxsave.mxcsr;
+ } else {
+ return MXCSR_DEFAULT;
+ }
+}
+
+int fpu__exception_code(struct fpu *fpu, int trap_nr)
+{
+ int err;
+
+ if (trap_nr == X86_TRAP_MF) {
+ unsigned short cwd, swd;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't synchronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(fpu);
+ swd = get_fpu_swd(fpu);
+
+ err = swd & ~cwd;
+ } else {
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ unsigned short mxcsr = get_fpu_mxcsr(fpu);
+ err = ~(mxcsr >> 7) & mxcsr;
+ }
+
+ if (err & 0x001) { /* Invalid op */
+ /*
+ * swd & 0x240 == 0x040: Stack Underflow
+ * swd & 0x240 == 0x240: Stack Overflow
+ * User must clear the SF bit (0x40) if set
+ */
+ return FPE_FLTINV;
+ } else if (err & 0x004) { /* Divide by Zero */
+ return FPE_FLTDIV;
+ } else if (err & 0x008) { /* Overflow */
+ return FPE_FLTOVF;
+ } else if (err & 0x012) { /* Denormal, Underflow */
+ return FPE_FLTUND;
+ } else if (err & 0x020) { /* Precision */
+ return FPE_FLTRES;
+ }
+
+ /*
+ * If we're using IRQ 13, or supposedly even some trap
+ * X86_TRAP_MF implementations, it's possible
+ * we get a spurious trap, which is not an error.
+ */
+ return 0;
+}