summaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/fpu/core.c38
-rw-r--r--arch/x86/kernel/fpu/xsave.c11
-rw-r--r--arch/x86/kernel/signal.c8
3 files changed, 36 insertions, 21 deletions
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 779813126f49..9e7f9e7b2cca 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -236,14 +236,17 @@ static void fpu_copy(struct task_struct *dst, struct task_struct *src)
int fpu__copy(struct task_struct *dst, struct task_struct *src)
{
+ struct fpu *dst_fpu = &dst->thread.fpu;
+ struct fpu *src_fpu = &src->thread.fpu;
+
dst->thread.fpu.counter = 0;
dst->thread.fpu.has_fpu = 0;
dst->thread.fpu.state = NULL;
task_disable_lazy_fpu_restore(dst);
- if (src->flags & PF_USED_MATH) {
- int err = fpstate_alloc(&dst->thread.fpu);
+ if (src_fpu->fpstate_active) {
+ int err = fpstate_alloc(dst_fpu);
if (err)
return err;
@@ -260,11 +263,12 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
*/
int fpstate_alloc_init(struct task_struct *curr)
{
+ struct fpu *fpu = &curr->thread.fpu;
int ret;
if (WARN_ON_ONCE(curr != current))
return -EINVAL;
- if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
+ if (WARN_ON_ONCE(fpu->fpstate_active))
return -EINVAL;
/*
@@ -277,7 +281,7 @@ int fpstate_alloc_init(struct task_struct *curr)
fpstate_init(&curr->thread.fpu);
/* Safe to do for the current task: */
- curr->flags |= PF_USED_MATH;
+ fpu->fpstate_active = 1;
return 0;
}
@@ -308,12 +312,13 @@ EXPORT_SYMBOL_GPL(fpstate_alloc_init);
*/
static int fpu__unlazy_stopped(struct task_struct *child)
{
+ struct fpu *child_fpu = &child->thread.fpu;
int ret;
if (WARN_ON_ONCE(child == current))
return -EINVAL;
- if (child->flags & PF_USED_MATH) {
+ if (child_fpu->fpstate_active) {
task_disable_lazy_fpu_restore(child);
return 0;
}
@@ -328,7 +333,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
fpstate_init(&child->thread.fpu);
/* Safe to do for stopped child tasks: */
- child->flags |= PF_USED_MATH;
+ child_fpu->fpstate_active = 1;
return 0;
}
@@ -348,7 +353,7 @@ void fpu__restore(void)
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
- if (!(tsk->flags & PF_USED_MATH)) {
+ if (!fpu->fpstate_active) {
local_irq_enable();
/*
* does a slab alloc which can sleep
@@ -378,6 +383,8 @@ EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__flush_thread(struct task_struct *tsk)
{
+ struct fpu *fpu = &tsk->thread.fpu;
+
WARN_ON(tsk != current);
if (!use_eager_fpu()) {
@@ -385,7 +392,7 @@ void fpu__flush_thread(struct task_struct *tsk)
drop_fpu(tsk);
fpstate_free(&tsk->thread.fpu);
} else {
- if (!(tsk->flags & PF_USED_MATH)) {
+ if (!fpu->fpstate_active) {
/* kthread execs. TODO: cleanup this horror. */
if (WARN_ON(fpstate_alloc_init(tsk)))
force_sig(SIGKILL, tsk);
@@ -402,12 +409,16 @@ void fpu__flush_thread(struct task_struct *tsk)
*/
int fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
- return (target->flags & PF_USED_MATH) ? regset->n : 0;
+ struct fpu *target_fpu = &target->thread.fpu;
+
+ return target_fpu->fpstate_active ? regset->n : 0;
}
int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
{
- return (cpu_has_fxsr && (target->flags & PF_USED_MATH)) ? regset->n : 0;
+ struct fpu *target_fpu = &target->thread.fpu;
+
+ return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
}
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
@@ -733,16 +744,17 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
* struct user_i387_struct) but is in fact only used for 32-bit
* dumps, so on 64-bit it is really struct user_i387_ia32_struct.
*/
-int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
+int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
{
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
- fpvalid = !!(tsk->flags & PF_USED_MATH);
+ fpvalid = fpu->fpstate_active;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),
- fpu, NULL);
+ ufpu, NULL);
return fpvalid;
}
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index 8cd127049c9b..dc346e19c0df 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -334,6 +334,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
int state_size = xstate_size;
u64 xstate_bv = 0;
int fx_only = 0;
@@ -349,7 +350,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- if (!(tsk->flags & PF_USED_MATH) && fpstate_alloc_init(tsk))
+ if (!fpu->fpstate_active && fpstate_alloc_init(tsk))
return -1;
if (!static_cpu_has(X86_FEATURE_FPU))
@@ -384,12 +385,12 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
int err = 0;
/*
- * Drop the current fpu which clears PF_USED_MATH. This ensures
+ * Drop the current fpu which clears fpu->fpstate_active. This ensures
* that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after
- * PF_USED_MATH is again set.
+ * fpu->fpstate_active is again set.
*/
drop_fpu(tsk);
@@ -401,7 +402,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
}
- tsk->flags |= PF_USED_MATH;
+ fpu->fpstate_active = 1;
if (use_eager_fpu()) {
preempt_disable();
fpu__restore();
@@ -685,7 +686,7 @@ void xsave_init(void)
*/
void __init_refok eager_fpu_init(void)
{
- WARN_ON(current->flags & PF_USED_MATH);
+ WARN_ON(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;
if (eagerfpu == ENABLE)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 8e2529ebb8c6..20a9d355af59 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -198,6 +198,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
unsigned long sp = regs->sp;
unsigned long buf_fx = 0;
int onsigstack = on_sig_stack(sp);
+ struct fpu *fpu = &current->thread.fpu;
/* redzone */
if (config_enabled(CONFIG_X86_64))
@@ -217,7 +218,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
}
}
- if (current->flags & PF_USED_MATH) {
+ if (fpu->fpstate_active) {
sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
&buf_fx, &math_size);
*fpstate = (void __user *)sp;
@@ -233,7 +234,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
return (void __user *)-1L;
/* save i387 and extended state */
- if ((current->flags & PF_USED_MATH) &&
+ if (fpu->fpstate_active &&
save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0)
return (void __user *)-1L;
@@ -616,6 +617,7 @@ static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
bool stepping, failed;
+ struct fpu *fpu = &current->thread.fpu;
/* Are we from a system call? */
if (syscall_get_nr(current, regs) >= 0) {
@@ -664,7 +666,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/*
* Ensure the signal handler starts with the new fpu state.
*/
- if (current->flags & PF_USED_MATH)
+ if (fpu->fpstate_active)
fpu_reset_state(current);
}
signal_setup_done(failed, ksig, stepping);