aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/i387.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/i387.c')
-rw-r--r--arch/x86/kernel/i387.c559
1 files changed, 360 insertions, 199 deletions
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index d2e39e69aaf..d5dd8081441 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -5,116 +5,257 @@
* General FPU state handling cleanups
* Gareth Hughes <gareth@valinux.com>, May 2000
*/
-
-#include <linux/sched.h>
#include <linux/module.h>
#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/sigcontext.h>
#include <asm/processor.h>
-#include <asm/i387.h>
#include <asm/math_emu.h>
-#include <asm/sigcontext.h>
-#include <asm/user.h>
-#include <asm/ptrace.h>
#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
+#include <asm/user.h>
-#ifdef CONFIG_X86_64
+/*
+ * Were we in an interrupt that interrupted kernel mode?
+ *
+ * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
+ * pair does nothing at all: the thread must not have fpu (so
+ * that we don't try to save the FPU state), and TS must
+ * be set (so that the clts/stts pair does nothing that is
+ * visible in the interrupted kernel thread).
+ *
+ * Except for the eagerfpu case when we return 1 unless we've already
+ * been eager and saved the state in kernel_fpu_begin().
+ */
+static inline bool interrupted_kernel_fpu_idle(void)
+{
+ if (use_eager_fpu())
+ return __thread_has_fpu(current);
-#include <asm/sigcontext32.h>
-#include <asm/user32.h>
+ return !__thread_has_fpu(current) &&
+ (read_cr0() & X86_CR0_TS);
+}
-#else
+/*
+ * Were we in user mode (or vm86 mode) when we were
+ * interrupted?
+ *
+ * Doing kernel_fpu_begin/end() is ok if we are running
+ * in an interrupt context from user mode - we'll just
+ * save the FPU state as required.
+ */
+static inline bool interrupted_user_mode(void)
+{
+ struct pt_regs *regs = get_irq_regs();
+ return regs && user_mode_vm(regs);
+}
-#define save_i387_ia32 save_i387
-#define restore_i387_ia32 restore_i387
+/*
+ * Can we use the FPU in kernel mode with the
+ * whole "kernel_fpu_begin/end()" sequence?
+ *
+ * It's always ok in process context (ie "not interrupt")
+ * but it is sometimes ok even from an irq.
+ */
+bool irq_fpu_usable(void)
+{
+ return !in_interrupt() ||
+ interrupted_user_mode() ||
+ interrupted_kernel_fpu_idle();
+}
+EXPORT_SYMBOL(irq_fpu_usable);
-#define _fpstate_ia32 _fpstate
-#define user_i387_ia32_struct user_i387_struct
-#define user32_fxsr_struct user_fxsr_struct
+void __kernel_fpu_begin(void)
+{
+ struct task_struct *me = current;
+
+ if (__thread_has_fpu(me)) {
+ __thread_clear_has_fpu(me);
+ __save_init_fpu(me);
+ /* We do 'stts()' in __kernel_fpu_end() */
+ } else if (!use_eager_fpu()) {
+ this_cpu_write(fpu_owner_task, NULL);
+ clts();
+ }
+}
+EXPORT_SYMBOL(__kernel_fpu_begin);
-#endif
+void __kernel_fpu_end(void)
+{
+ if (use_eager_fpu()) {
+ /*
+ * For eager fpu, most the time, tsk_used_math() is true.
+ * Restore the user math as we are done with the kernel usage.
+ * At few instances during thread exit, signal handling etc,
+ * tsk_used_math() is false. Those few places will take proper
+ * actions, so we don't need to restore the math here.
+ */
+ if (likely(tsk_used_math(current)))
+ math_state_restore();
+ } else {
+ stts();
+ }
+}
+EXPORT_SYMBOL(__kernel_fpu_end);
-#ifdef CONFIG_MATH_EMULATION
-#define HAVE_HWFP (boot_cpu_data.hard_math)
-#else
-#define HAVE_HWFP 1
-#endif
+void unlazy_fpu(struct task_struct *tsk)
+{
+ preempt_disable();
+ if (__thread_has_fpu(tsk)) {
+ __save_init_fpu(tsk);
+ __thread_fpu_end(tsk);
+ } else
+ tsk->thread.fpu_counter = 0;
+ preempt_enable();
+}
+EXPORT_SYMBOL(unlazy_fpu);
-static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+unsigned int xstate_size;
+EXPORT_SYMBOL_GPL(xstate_size);
+static struct i387_fxsave_struct fx_scratch;
-void mxcsr_feature_mask_init(void)
+static void mxcsr_feature_mask_init(void)
{
unsigned long mask = 0;
- clts();
+
if (cpu_has_fxsr) {
- memset(&current->thread.i387.fxsave, 0,
- sizeof(struct i387_fxsave_struct));
- asm volatile("fxsave %0" : : "m" (current->thread.i387.fxsave));
- mask = current->thread.i387.fxsave.mxcsr_mask;
+ memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
+ asm volatile("fxsave %0" : "+m" (fx_scratch));
+ mask = fx_scratch.mxcsr_mask;
if (mask == 0)
mask = 0x0000ffbf;
}
mxcsr_feature_mask &= mask;
- stts();
}
-#ifdef CONFIG_X86_64
+static void init_thread_xstate(void)
+{
+ /*
+ * Note that xstate_size might be overwriten later during
+ * xsave_init().
+ */
+
+ if (!cpu_has_fpu) {
+ /*
+ * Disable xsave as we do not support it if i387
+ * emulation is enabled.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+ setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+ xstate_size = sizeof(struct i387_soft_struct);
+ return;
+ }
+
+ if (cpu_has_fxsr)
+ xstate_size = sizeof(struct i387_fxsave_struct);
+ else
+ xstate_size = sizeof(struct i387_fsave_struct);
+}
+
/*
* Called at bootup to set up the initial FPU state that is later cloned
* into all processes.
*/
-void __cpuinit fpu_init(void)
-{
- unsigned long oldcr0 = read_cr0();
- extern void __bad_fxsave_alignment(void);
- if (offsetof(struct task_struct, thread.i387.fxsave) & 15)
- __bad_fxsave_alignment();
- set_in_cr4(X86_CR4_OSFXSR);
- set_in_cr4(X86_CR4_OSXMMEXCPT);
+void fpu_init(void)
+{
+ unsigned long cr0;
+ unsigned long cr4_mask = 0;
+
+#ifndef CONFIG_MATH_EMULATION
+ if (!cpu_has_fpu) {
+ pr_emerg("No FPU found and no math emulation present\n");
+ pr_emerg("Giving up\n");
+ for (;;)
+ asm volatile("hlt");
+ }
+#endif
+ if (cpu_has_fxsr)
+ cr4_mask |= X86_CR4_OSFXSR;
+ if (cpu_has_xmm)
+ cr4_mask |= X86_CR4_OSXMMEXCPT;
+ if (cr4_mask)
+ set_in_cr4(cr4_mask);
+
+ cr0 = read_cr0();
+ cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
+ if (!cpu_has_fpu)
+ cr0 |= X86_CR0_EM;
+ write_cr0(cr0);
- write_cr0(oldcr0 & ~((1UL<<3)|(1UL<<2))); /* clear TS and EM */
+ /*
+ * init_thread_xstate is only called once to avoid overriding
+ * xstate_size during boot time or during CPU hotplug.
+ */
+ if (xstate_size == 0)
+ init_thread_xstate();
mxcsr_feature_mask_init();
- /* clean state in init */
- current_thread_info()->status = 0;
- clear_used_math();
+ xsave_init();
+ eager_fpu_init();
+}
+
+void fpu_finit(struct fpu *fpu)
+{
+ if (!cpu_has_fpu) {
+ finit_soft_fpu(&fpu->state->soft);
+ return;
+ }
+
+ if (cpu_has_fxsr) {
+ fx_finit(&fpu->state->fxsave);
+ } else {
+ struct i387_fsave_struct *fp = &fpu->state->fsave;
+ memset(fp, 0, xstate_size);
+ fp->cwd = 0xffff037fu;
+ fp->swd = 0xffff0000u;
+ fp->twd = 0xffffffffu;
+ fp->fos = 0xffff0000u;
+ }
}
-#endif /* CONFIG_X86_64 */
+EXPORT_SYMBOL_GPL(fpu_finit);
/*
* The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default
* value at reset if we support XMM instructions and then
- * remeber the current task has used the FPU.
+ * remember the current task has used the FPU.
*/
-void init_fpu(struct task_struct *tsk)
+int init_fpu(struct task_struct *tsk)
{
+ int ret;
+
if (tsk_used_math(tsk)) {
- if (tsk == current)
+ if (cpu_has_fpu && tsk == current)
unlazy_fpu(tsk);
- return;
+ tsk->thread.fpu.last_cpu = ~0;
+ return 0;
}
- if (cpu_has_fxsr) {
- memset(&tsk->thread.i387.fxsave, 0,
- sizeof(struct i387_fxsave_struct));
- tsk->thread.i387.fxsave.cwd = 0x37f;
- if (cpu_has_xmm)
- tsk->thread.i387.fxsave.mxcsr = MXCSR_DEFAULT;
- } else {
- memset(&tsk->thread.i387.fsave, 0,
- sizeof(struct i387_fsave_struct));
- tsk->thread.i387.fsave.cwd = 0xffff037fu;
- tsk->thread.i387.fsave.swd = 0xffff0000u;
- tsk->thread.i387.fsave.twd = 0xffffffffu;
- tsk->thread.i387.fsave.fos = 0xffff0000u;
- }
/*
- * Only the device not available exception or ptrace can call init_fpu.
+ * Memory allocation at the first usage of the FPU and other state.
*/
+ ret = fpu_alloc(&tsk->thread.fpu);
+ if (ret)
+ return ret;
+
+ fpu_finit(&tsk->thread.fpu);
+
set_stopped_child_used_math(tsk);
+ return 0;
}
+EXPORT_SYMBOL_GPL(init_fpu);
+/*
+ * The xstateregs_active() routine is the same as the fpregs_active() routine,
+ * as the "regset->n" for the xstate regset will be updated based on the feature
+ * capabilites supported by the xsave.
+ */
int fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
@@ -129,13 +270,19 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
+ int ret;
+
if (!cpu_has_fxsr)
return -ENODEV;
- init_fpu(target);
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ sanitize_i387_state(target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.i387.fxsave, 0, -1);
+ &target->thread.fpu.state->fxsave, 0, -1);
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
@@ -147,16 +294,88 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr)
return -ENODEV;
- init_fpu(target);
- set_stopped_child_used_math(target);
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ sanitize_i387_state(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.i387.fxsave, 0, -1);
+ &target->thread.fpu.state->fxsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
- target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
+ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
+
+ /*
+ * update the header bits in the xsave header, indicating the
+ * presence of FP and SSE state.
+ */
+ if (cpu_has_xsave)
+ target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
+
+ return ret;
+}
+
+int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_xsave)
+ return -ENODEV;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ /*
+ * Copy the 48bytes defined by the software first into the xstate
+ * memory layout in the thread struct, so that we can copy the entire
+ * xstateregs to the user using one user_regset_copyout().
+ */
+ memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
+ xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
+
+ /*
+ * Copy the xstate memory layout.
+ */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.state->xsave, 0, -1);
+ return ret;
+}
+
+int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct xsave_hdr_struct *xsave_hdr;
+
+ if (!cpu_has_xsave)
+ return -ENODEV;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.state->xsave, 0, -1);
+
+ /*
+ * mxcsr reserved bits must be masked to zero for security reasons.
+ */
+ target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
+
+ xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
+
+ xsave_hdr->xstate_bv &= pcntxt_mask;
+ /*
+ * These bits must be zero.
+ */
+ xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
return ret;
}
@@ -178,10 +397,11 @@ static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+
return tmp;
}
-#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16);
+#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
#define FP_EXP_TAG_VALID 0
#define FP_EXP_TAG_ZERO 1
#define FP_EXP_TAG_SPECIAL 2
@@ -232,10 +452,10 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
* FXSR floating point environment conversions.
*/
-static void convert_from_fxsr(struct user_i387_ia32_struct *env,
- struct task_struct *tsk)
+void
+convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -247,18 +467,17 @@ static void convert_from_fxsr(struct user_i387_ia32_struct *env,
#ifdef CONFIG_X86_64
env->fip = fxsave->rip;
env->foo = fxsave->rdp;
+ /*
+ * should be actually ds/cs at fpu exception time, but
+ * that information is not available in 64bit mode.
+ */
+ env->fcs = task_pt_regs(tsk)->cs;
if (tsk == current) {
- /*
- * should be actually ds/cs at fpu exception time, but
- * that information is not available in 64bit mode.
- */
- asm("mov %%ds,%0" : "=r" (env->fos));
- asm("mov %%cs,%0" : "=r" (env->fcs));
+ savesegment(ds, env->fos);
} else {
- struct pt_regs *regs = task_pt_regs(tsk);
- env->fos = 0xffff0000 | tsk->thread.ds;
- env->fcs = regs->cs;
+ env->fos = tsk->thread.ds;
}
+ env->fos |= 0xffff0000;
#else
env->fip = fxsave->fip;
env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
@@ -270,11 +489,11 @@ static void convert_from_fxsr(struct user_i387_ia32_struct *env,
memcpy(&to[i], &from[i], sizeof(to[0]));
}
-static void convert_to_fxsr(struct task_struct *tsk,
- const struct user_i387_ia32_struct *env)
+void convert_to_fxsr(struct task_struct *tsk,
+ const struct user_i387_ia32_struct *env)
{
- struct i387_fxsave_struct *fxsave = &tsk->thread.i387.fxsave;
+ struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -303,15 +522,21 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
void *kbuf, void __user *ubuf)
{
struct user_i387_ia32_struct env;
+ int ret;
- if (!HAVE_HWFP)
- return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
- init_fpu(target);
+ if (!static_cpu_has(X86_FEATURE_FPU))
+ return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
if (!cpu_has_fxsr)
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.i387.fsave, 0, -1);
+ &target->thread.fpu.state->fsave, 0,
+ -1);
+
+ sanitize_i387_state(target);
if (kbuf && pos == 0 && count == sizeof(env)) {
convert_from_fxsr(kbuf, target);
@@ -319,6 +544,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
}
convert_from_fxsr(&env, target);
+
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
}
@@ -329,15 +555,19 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
struct user_i387_ia32_struct env;
int ret;
- if (!HAVE_HWFP)
- return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ sanitize_i387_state(target);
- init_fpu(target);
- set_stopped_child_used_math(target);
+ if (!static_cpu_has(X86_FEATURE_FPU))
+ return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
if (!cpu_has_fxsr)
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.i387.fsave, 0, -1);
+ &target->thread.fpu.state->fsave, 0,
+ -1);
if (pos > 0 || count < sizeof(env))
convert_from_fxsr(&env, target);
@@ -346,112 +576,13 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!ret)
convert_to_fxsr(target, &env);
- return ret;
-}
-
-/*
- * Signal frame handlers.
- */
-
-static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
-{
- struct task_struct *tsk = current;
-
- unlazy_fpu(tsk);
- tsk->thread.i387.fsave.status = tsk->thread.i387.fsave.swd;
- if (__copy_to_user(buf, &tsk->thread.i387.fsave,
- sizeof(struct i387_fsave_struct)))
- return -1;
- return 1;
-}
-
-static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
-{
- struct task_struct *tsk = current;
- struct user_i387_ia32_struct env;
- int err = 0;
-
- unlazy_fpu(tsk);
-
- convert_from_fxsr(&env, tsk);
- if (__copy_to_user(buf, &env, sizeof(env)))
- return -1;
-
- err |= __put_user(tsk->thread.i387.fxsave.swd, &buf->status);
- err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
- if (err)
- return -1;
-
- if (__copy_to_user(&buf->_fxsr_env[0], &tsk->thread.i387.fxsave,
- sizeof(struct i387_fxsave_struct)))
- return -1;
- return 1;
-}
-
-int save_i387_ia32(struct _fpstate_ia32 __user *buf)
-{
- if (!used_math())
- return 0;
-
- /* This will cause a "finit" to be triggered by the next
- * attempted FPU operation by the 'current' process.
+ /*
+ * update the header bit in the xsave header, indicating the
+ * presence of FP.
*/
- clear_used_math();
-
- if (HAVE_HWFP) {
- if (cpu_has_fxsr) {
- return save_i387_fxsave(buf);
- } else {
- return save_i387_fsave(buf);
- }
- } else {
- return fpregs_soft_get(current, NULL,
- 0, sizeof(struct user_i387_ia32_struct),
- NULL, buf) ? -1 : 1;
- }
-}
-
-static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
-{
- struct task_struct *tsk = current;
- clear_fpu(tsk);
- return __copy_from_user(&tsk->thread.i387.fsave, buf,
- sizeof(struct i387_fsave_struct));
-}
-
-static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf)
-{
- int err;
- struct task_struct *tsk = current;
- struct user_i387_ia32_struct env;
- clear_fpu(tsk);
- err = __copy_from_user(&tsk->thread.i387.fxsave, &buf->_fxsr_env[0],
- sizeof(struct i387_fxsave_struct));
- /* mxcsr reserved bits must be masked to zero for security reasons */
- tsk->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
- if (err || __copy_from_user(&env, buf, sizeof(env)))
- return 1;
- convert_to_fxsr(tsk, &env);
- return 0;
-}
-
-int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
-{
- int err;
-
- if (HAVE_HWFP) {
- if (cpu_has_fxsr) {
- err = restore_i387_fxsave(buf);
- } else {
- err = restore_i387_fsave(buf);
- }
- } else {
- err = fpregs_soft_set(current, NULL,
- 0, sizeof(struct user_i387_ia32_struct),
- NULL, buf) != 0;
- }
- set_used_math();
- return err;
+ if (cpu_has_xsave)
+ target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
+ return ret;
}
/*
@@ -463,8 +594,8 @@ int restore_i387_ia32(struct _fpstate_ia32 __user *buf)
*/
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
{
- int fpvalid;
struct task_struct *tsk = current;
+ int fpvalid;
fpvalid = !!used_math();
if (fpvalid)
@@ -477,3 +608,33 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
EXPORT_SYMBOL(dump_fpu);
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
+
+static int __init no_387(char *s)
+{
+ setup_clear_cpu_cap(X86_FEATURE_FPU);
+ return 1;
+}
+
+__setup("no387", no_387);
+
+void fpu_detect(struct cpuinfo_x86 *c)
+{
+ unsigned long cr0;
+ u16 fsw, fcw;
+
+ fsw = fcw = 0xffff;
+
+ cr0 = read_cr0();
+ cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
+ write_cr0(cr0);
+
+ asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+ : "+m" (fsw), "+m" (fcw));
+
+ if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+ set_cpu_cap(c, X86_FEATURE_FPU);
+ else
+ clear_cpu_cap(c, X86_FEATURE_FPU);
+
+ /* The final cr0 value is set in fpu_init() */
+}