Commit 2722146e authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Borislav Petkov

x86/fpu: Remove fpu->initialized

The struct fpu.initialized member is always set to one for user tasks
and zero for kernel tasks. This avoids saving/restoring the FPU
registers for kernel threads.

The ->initialized = 0 case for user tasks has been removed in previous
changes, for instance, by doing an explicit unconditional init at fork()
time for FPU-less systems which was otherwise delayed until the emulated
opcode.

The context switch code (switch_fpu_prepare() + switch_fpu_finish())
can't unconditionally save/restore registers for kernel threads. Not
only would it slow down the switch but also load a zeroed xcomp_bv for
XSAVES.

For kernel_fpu_begin() (+end) the situation is similar: EFI with runtime
services uses this before alternatives_patched is true. Which means that
this function is used too early and it wasn't the case before.

For those two cases, use current->mm to distinguish between user and
kernel thread. For kernel_fpu_begin() skip save/restore of the FPU
registers.

During the context switch into a kernel thread don't do anything. There
is no reason to save the FPU state of a kernel thread.

The reordering in __switch_to() is important because the current()
pointer needs to be valid before switch_fpu_finish() is invoked so ->mm
is seen of the new task instead the old one.

N.B.: fpu__save() doesn't need to check ->mm because it is called by
user tasks only.

 [ bp: Massage. ]
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@intel.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aubrey Li <aubrey.li@intel.com>
Cc: Babu Moger <Babu.Moger@amd.com>
Cc: "Chang S. Bae" <chang.seok.bae@intel.com>
Cc: Dmitry Safonov <dima@arista.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: kvm ML <kvm@vger.kernel.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Nicolai Stange <nstange@suse.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190403164156.19645-8-bigeasy@linutronix.de
parent 39388e80
...@@ -216,8 +216,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, ...@@ -216,8 +216,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size, size_t frame_size,
void __user **fpstate) void __user **fpstate)
{ {
struct fpu *fpu = &current->thread.fpu; unsigned long sp, fx_aligned, math_size;
unsigned long sp;
/* Default to using normal stack */ /* Default to using normal stack */
sp = regs->sp; sp = regs->sp;
...@@ -231,15 +230,11 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, ...@@ -231,15 +230,11 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
ksig->ka.sa.sa_restorer) ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer; sp = (unsigned long) ksig->ka.sa.sa_restorer;
if (fpu->initialized) { sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
unsigned long fx_aligned, math_size; *fpstate = (struct _fpstate_32 __user *) sp;
if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); math_size) < 0)
*fpstate = (struct _fpstate_32 __user *) sp; return (void __user *) -1L;
if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
math_size) < 0)
return (void __user *) -1L;
}
sp -= frame_size; sp -= frame_size;
/* Align the stack pointer according to the i386 ABI, /* Align the stack pointer according to the i386 ABI,
......
...@@ -494,11 +494,14 @@ static inline void fpregs_activate(struct fpu *fpu) ...@@ -494,11 +494,14 @@ static inline void fpregs_activate(struct fpu *fpu)
* *
* - switch_fpu_finish() restores the new state as * - switch_fpu_finish() restores the new state as
* necessary. * necessary.
*
* The FPU context is only stored/restored for a user task and
* ->mm is used to distinguish between kernel and user threads.
*/ */
static inline void static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu) switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{ {
if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) { if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
if (!copy_fpregs_to_fpstate(old_fpu)) if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1; old_fpu->last_cpu = -1;
else else
...@@ -506,8 +509,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu) ...@@ -506,8 +509,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
/* But leave fpu_fpregs_owner_ctx! */ /* But leave fpu_fpregs_owner_ctx! */
trace_x86_fpu_regs_deactivated(old_fpu); trace_x86_fpu_regs_deactivated(old_fpu);
} else }
old_fpu->last_cpu = -1;
} }
/* /*
...@@ -520,12 +522,12 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu) ...@@ -520,12 +522,12 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
*/ */
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{ {
bool preload = static_cpu_has(X86_FEATURE_FPU) && if (static_cpu_has(X86_FEATURE_FPU)) {
new_fpu->initialized; if (!fpregs_state_valid(new_fpu, cpu)) {
if (current->mm)
copy_kernel_to_fpregs(&new_fpu->state);
}
if (preload) {
if (!fpregs_state_valid(new_fpu, cpu))
copy_kernel_to_fpregs(&new_fpu->state);
fpregs_activate(new_fpu); fpregs_activate(new_fpu);
} }
} }
......
...@@ -293,15 +293,6 @@ struct fpu { ...@@ -293,15 +293,6 @@ struct fpu {
*/ */
unsigned int last_cpu; unsigned int last_cpu;
/*
* @initialized:
*
* This flag indicates whether this context is initialized: if the task
* is not running then we can restore from this context, if the task
* is running then we should save into this context.
*/
unsigned char initialized;
/* /*
* @avx512_timestamp: * @avx512_timestamp:
* *
......
...@@ -13,22 +13,19 @@ DECLARE_EVENT_CLASS(x86_fpu, ...@@ -13,22 +13,19 @@ DECLARE_EVENT_CLASS(x86_fpu,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct fpu *, fpu) __field(struct fpu *, fpu)
__field(bool, initialized)
__field(u64, xfeatures) __field(u64, xfeatures)
__field(u64, xcomp_bv) __field(u64, xcomp_bv)
), ),
TP_fast_assign( TP_fast_assign(
__entry->fpu = fpu; __entry->fpu = fpu;
__entry->initialized = fpu->initialized;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures; __entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
} }
), ),
TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx", TP_printk("x86/fpu: %p xfeatures: %llx xcomp_bv: %llx",
__entry->fpu, __entry->fpu,
__entry->initialized,
__entry->xfeatures, __entry->xfeatures,
__entry->xcomp_bv __entry->xcomp_bv
) )
......
...@@ -101,7 +101,7 @@ static void __kernel_fpu_begin(void) ...@@ -101,7 +101,7 @@ static void __kernel_fpu_begin(void)
kernel_fpu_disable(); kernel_fpu_disable();
if (fpu->initialized) { if (current->mm) {
/* /*
* Ignore return value -- we don't care if reg state * Ignore return value -- we don't care if reg state
* is clobbered. * is clobbered.
...@@ -116,7 +116,7 @@ static void __kernel_fpu_end(void) ...@@ -116,7 +116,7 @@ static void __kernel_fpu_end(void)
{ {
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
if (fpu->initialized) if (current->mm)
copy_kernel_to_fpregs(&fpu->state); copy_kernel_to_fpregs(&fpu->state);
kernel_fpu_enable(); kernel_fpu_enable();
...@@ -147,11 +147,10 @@ void fpu__save(struct fpu *fpu) ...@@ -147,11 +147,10 @@ void fpu__save(struct fpu *fpu)
preempt_disable(); preempt_disable();
trace_x86_fpu_before_save(fpu); trace_x86_fpu_before_save(fpu);
if (fpu->initialized) {
if (!copy_fpregs_to_fpstate(fpu)) { if (!copy_fpregs_to_fpstate(fpu))
copy_kernel_to_fpregs(&fpu->state); copy_kernel_to_fpregs(&fpu->state);
}
}
trace_x86_fpu_after_save(fpu); trace_x86_fpu_after_save(fpu);
preempt_enable(); preempt_enable();
} }
...@@ -190,7 +189,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) ...@@ -190,7 +189,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{ {
dst_fpu->last_cpu = -1; dst_fpu->last_cpu = -1;
if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU)) if (!static_cpu_has(X86_FEATURE_FPU))
return 0; return 0;
WARN_ON_FPU(src_fpu != &current->thread.fpu); WARN_ON_FPU(src_fpu != &current->thread.fpu);
...@@ -227,14 +226,10 @@ static void fpu__initialize(struct fpu *fpu) ...@@ -227,14 +226,10 @@ static void fpu__initialize(struct fpu *fpu)
{ {
WARN_ON_FPU(fpu != &current->thread.fpu); WARN_ON_FPU(fpu != &current->thread.fpu);
if (!fpu->initialized) { fpstate_init(&fpu->state);
fpstate_init(&fpu->state); trace_x86_fpu_init_state(fpu);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu); trace_x86_fpu_activate_state(fpu);
/* Safe to do for the current task: */
fpu->initialized = 1;
}
} }
/* /*
...@@ -247,32 +242,20 @@ static void fpu__initialize(struct fpu *fpu) ...@@ -247,32 +242,20 @@ static void fpu__initialize(struct fpu *fpu)
* *
* - or it's called for stopped tasks (ptrace), in which case the * - or it's called for stopped tasks (ptrace), in which case the
* registers were already saved by the context-switch code when * registers were already saved by the context-switch code when
* the task scheduled out - we only have to initialize the registers * the task scheduled out.
* if they've never been initialized.
* *
* If the task has used the FPU before then save it. * If the task has used the FPU before then save it.
*/ */
void fpu__prepare_read(struct fpu *fpu) void fpu__prepare_read(struct fpu *fpu)
{ {
if (fpu == &current->thread.fpu) { if (fpu == &current->thread.fpu)
fpu__save(fpu); fpu__save(fpu);
} else {
if (!fpu->initialized) {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for current and for stopped child tasks: */
fpu->initialized = 1;
}
}
} }
/* /*
* This function must be called before we write a task's fpstate. * This function must be called before we write a task's fpstate.
* *
* If the task has used the FPU before then invalidate any cached FPU registers. * Invalidate any cached FPU registers.
* If the task has not used the FPU before then initialize its fpstate.
* *
* After this function call, after registers in the fpstate are * After this function call, after registers in the fpstate are
* modified and the child task has woken up, the child task will * modified and the child task has woken up, the child task will
...@@ -289,17 +272,8 @@ void fpu__prepare_write(struct fpu *fpu) ...@@ -289,17 +272,8 @@ void fpu__prepare_write(struct fpu *fpu)
*/ */
WARN_ON_FPU(fpu == &current->thread.fpu); WARN_ON_FPU(fpu == &current->thread.fpu);
if (fpu->initialized) { /* Invalidate any cached state: */
/* Invalidate any cached state: */ __fpu_invalidate_fpregs_state(fpu);
__fpu_invalidate_fpregs_state(fpu);
} else {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
trace_x86_fpu_activate_state(fpu);
/* Safe to do for stopped child tasks: */
fpu->initialized = 1;
}
} }
/* /*
...@@ -316,17 +290,13 @@ void fpu__drop(struct fpu *fpu) ...@@ -316,17 +290,13 @@ void fpu__drop(struct fpu *fpu)
preempt_disable(); preempt_disable();
if (fpu == &current->thread.fpu) { if (fpu == &current->thread.fpu) {
if (fpu->initialized) { /* Ignore delayed exceptions from user space */
/* Ignore delayed exceptions from user space */ asm volatile("1: fwait\n"
asm volatile("1: fwait\n" "2:\n"
"2:\n" _ASM_EXTABLE(1b, 2b));
_ASM_EXTABLE(1b, 2b)); fpregs_deactivate(fpu);
fpregs_deactivate(fpu);
}
} }
fpu->initialized = 0;
trace_x86_fpu_dropped(fpu); trace_x86_fpu_dropped(fpu);
preempt_enable(); preempt_enable();
......
...@@ -239,8 +239,6 @@ static void __init fpu__init_system_ctx_switch(void) ...@@ -239,8 +239,6 @@ static void __init fpu__init_system_ctx_switch(void)
WARN_ON_FPU(!on_boot_cpu); WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0; on_boot_cpu = 0;
WARN_ON_FPU(current->thread.fpu.initialized);
} }
/* /*
......
...@@ -15,16 +15,12 @@ ...@@ -15,16 +15,12 @@
*/ */
int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset) int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{ {
struct fpu *target_fpu = &target->thread.fpu; return regset->n;
return target_fpu->initialized ? regset->n : 0;
} }
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{ {
struct fpu *target_fpu = &target->thread.fpu; if (boot_cpu_has(X86_FEATURE_FXSR))
if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
return regset->n; return regset->n;
else else
return 0; return 0;
...@@ -370,16 +366,9 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, ...@@ -370,16 +366,9 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu) int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
fpvalid = fpu->initialized;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),
ufpu, NULL);
return fpvalid; return !fpregs_get(tsk, NULL, 0, sizeof(struct user_i387_ia32_struct),
ufpu, NULL);
} }
EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(dump_fpu);
......
...@@ -892,8 +892,6 @@ const void *get_xsave_field_ptr(int xsave_state) ...@@ -892,8 +892,6 @@ const void *get_xsave_field_ptr(int xsave_state)
{ {
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
if (!fpu->initialized)
return NULL;
/* /*
* fpu__save() takes the CPU's xstate registers * fpu__save() takes the CPU's xstate registers
* and saves them off to the 'fpu memory buffer. * and saves them off to the 'fpu memory buffer.
......
...@@ -288,10 +288,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -288,10 +288,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs) if (prev->gs | next->gs)
lazy_load_gs(next->gs); lazy_load_gs(next->gs);
switch_fpu_finish(next_fpu, cpu);
this_cpu_write(current_task, next_p); this_cpu_write(current_task, next_p);
switch_fpu_finish(next_fpu, cpu);
/* Load the Intel cache allocation PQR MSR. */ /* Load the Intel cache allocation PQR MSR. */
resctrl_sched_in(); resctrl_sched_in();
......
...@@ -566,14 +566,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -566,14 +566,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
x86_fsgsbase_load(prev, next); x86_fsgsbase_load(prev, next);
switch_fpu_finish(next_fpu, cpu);
/* /*
* Switch the PDA and FPU contexts. * Switch the PDA and FPU contexts.
*/ */
this_cpu_write(current_task, next_p); this_cpu_write(current_task, next_p);
this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
switch_fpu_finish(next_fpu, cpu);
/* Reload sp0. */ /* Reload sp0. */
update_task_stack(next_p); update_task_stack(next_p);
......
...@@ -246,7 +246,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, ...@@ -246,7 +246,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
unsigned long sp = regs->sp; unsigned long sp = regs->sp;
unsigned long buf_fx = 0; unsigned long buf_fx = 0;
int onsigstack = on_sig_stack(sp); int onsigstack = on_sig_stack(sp);
struct fpu *fpu = &current->thread.fpu; int ret;
/* redzone */ /* redzone */
if (IS_ENABLED(CONFIG_X86_64)) if (IS_ENABLED(CONFIG_X86_64))
...@@ -265,11 +265,9 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, ...@@ -265,11 +265,9 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
sp = (unsigned long) ka->sa.sa_restorer; sp = (unsigned long) ka->sa.sa_restorer;
} }
if (fpu->initialized) { sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), &buf_fx, &math_size);
&buf_fx, &math_size); *fpstate = (void __user *)sp;
*fpstate = (void __user *)sp;
}
sp = align_sigframe(sp - frame_size); sp = align_sigframe(sp - frame_size);
...@@ -281,8 +279,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, ...@@ -281,8 +279,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
return (void __user *)-1L; return (void __user *)-1L;
/* save i387 and extended state */ /* save i387 and extended state */
if (fpu->initialized && ret = copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size);
copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) if (ret < 0)
return (void __user *)-1L; return (void __user *)-1L;
return (void __user *)sp; return (void __user *)sp;
...@@ -763,8 +761,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -763,8 +761,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* /*
* Ensure the signal handler starts with the new fpu state. * Ensure the signal handler starts with the new fpu state.
*/ */
if (fpu->initialized) fpu__clear(fpu);
fpu__clear(fpu);
} }
signal_setup_done(failed, ksig, stepping); signal_setup_done(failed, ksig, stepping);
} }
......
...@@ -39,17 +39,12 @@ int __execute_only_pkey(struct mm_struct *mm) ...@@ -39,17 +39,12 @@ int __execute_only_pkey(struct mm_struct *mm)
* dance to set PKRU if we do not need to. Check it * dance to set PKRU if we do not need to. Check it
* first and assume that if the execute-only pkey is * first and assume that if the execute-only pkey is
* write-disabled that we do not have to set it * write-disabled that we do not have to set it
* ourselves. We need preempt off so that nobody * ourselves.
* can make fpregs inactive.
*/ */
preempt_disable();
if (!need_to_set_mm_pkey && if (!need_to_set_mm_pkey &&
current->thread.fpu.initialized &&
!__pkru_allows_read(read_pkru(), execute_only_pkey)) { !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
preempt_enable();
return execute_only_pkey; return execute_only_pkey;
} }
preempt_enable();
/* /*
* Set up PKRU so that it denies access for everything * Set up PKRU so that it denies access for everything
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment