mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 06:34:11 +08:00
x86/fpu: Synchronize the naming of drop_fpu() and fpu_reset_state()
drop_fpu() and fpu_reset_state() are similar in functionality and in scope, yet this is not apparent from their names. drop_fpu() deactivates FPU contents (both the fpregs and the fpstate), but leaves register contents intact in the eager-FPU case, mostly as an optimization. It disables fpregs in the lazy FPU case. The drop_fpu() method can be used to destroy FPU state in an optimized way, when we know that a new state will be loaded before user-space might see any remains of the old FPU state: - such as in sys_exit()'s exit_thread() where we know this task won't execute any user-space instructions anymore and the next context switch cleans up the FPU. The old FPU state might still be around in the eagerfpu case but won't be saved. - in __restore_xstate_sig(), where we use drop_fpu() before copying a new state into the fpstate and activating that one. No user-pace instructions can execute between those steps. - in sys_execve()'s fpu__clear(): there we use drop_fpu() in the !eagerfpu case, where it's equivalent to a full reinit. fpu_reset_state() is a stronger version of drop_fpu(): both in the eagerfpu and the lazy-FPU case it guarantees that fpregs are reinitialized to init state. This method is used in cases where we need a full reset: - handle_signal() uses fpu_reset_state() to reset the FPU state to init before executing a user-space signal handler. While we have already saved the original FPU state at this point, and always restore the original state, the signal handling code still has to do this reinit, because signals may interrupt any user-space instruction, and the FPU might be in various intermediate states (such as an unbalanced x87 stack) that is not immediately usable for general C signal handler code. - __restore_xstate_sig() uses fpu_reset_state() when the signal frame has no FP context. Since the signal handler may have modified the FPU state, it gets reset back to init state. - in another branch __restore_xstate_sig() uses fpu_reset_state() to handle a restoration error: when restore_user_xstate() fails to restore FPU state and we might have inconsistent FPU data, fpu_reset_state() is used to reset it back to a known good state. - __kernel_fpu_end() uses fpu_reset_state() in an error branch. This is in a 'must not trigger' error branch, so on bug-free kernels this never triggers. - fpu__restore() uses fpu_reset_state() in an error path as well: if the fpstate was set up with invalid FPU state (via ptrace or via a signal handler), then it's reset back to init state. - likewise, the scheduler's switch_fpu_finish() uses it in a restoration error path too. Move both drop_fpu() and fpu_reset_state() to the fpu__*() namespace and harmonize their naming with their function: fpu__drop() fpu__reset() This clearly shows that both methods operate on the full state of the FPU, just like fpu__restore(). Also add comments to explain what each function does. Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
5e907bb045
commit
5033861575
@ -382,11 +382,17 @@ static inline void fpregs_deactivate(struct fpu *fpu)
|
|||||||
__fpregs_deactivate_hw();
|
__fpregs_deactivate_hw();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void drop_fpu(struct fpu *fpu)
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* Forget coprocessor state..
|
* Drops current FPU state: deactivates the fpregs and
|
||||||
|
* the fpstate. NOTE: it still leaves previous contents
|
||||||
|
* in the fpregs in the eager-FPU case.
|
||||||
|
*
|
||||||
|
* This function can be used in cases where we know that
|
||||||
|
* a state-restore is coming: either an explicit one,
|
||||||
|
* or a reschedule.
|
||||||
*/
|
*/
|
||||||
|
static inline void fpu__drop(struct fpu *fpu)
|
||||||
|
{
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
fpu->counter = 0;
|
fpu->counter = 0;
|
||||||
|
|
||||||
@ -412,13 +418,12 @@ static inline void restore_init_xstate(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset the FPU state in the eager case and drop it in the lazy case (later use
|
* Reset the FPU state back to init state.
|
||||||
* will reinit it).
|
|
||||||
*/
|
*/
|
||||||
static inline void fpu_reset_state(struct fpu *fpu)
|
static inline void fpu__reset(struct fpu *fpu)
|
||||||
{
|
{
|
||||||
if (!use_eager_fpu())
|
if (!use_eager_fpu())
|
||||||
drop_fpu(fpu);
|
fpu__drop(fpu);
|
||||||
else
|
else
|
||||||
restore_init_xstate();
|
restore_init_xstate();
|
||||||
}
|
}
|
||||||
@ -516,7 +521,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switc
|
|||||||
{
|
{
|
||||||
if (fpu_switch.preload) {
|
if (fpu_switch.preload) {
|
||||||
if (unlikely(restore_fpu_checking(new_fpu)))
|
if (unlikely(restore_fpu_checking(new_fpu)))
|
||||||
fpu_reset_state(new_fpu);
|
fpu__reset(new_fpu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
|
|||||||
|
|
||||||
if (fpu->fpregs_active) {
|
if (fpu->fpregs_active) {
|
||||||
if (WARN_ON(restore_fpu_checking(fpu)))
|
if (WARN_ON(restore_fpu_checking(fpu)))
|
||||||
fpu_reset_state(fpu);
|
fpu__reset(fpu);
|
||||||
} else {
|
} else {
|
||||||
__fpregs_deactivate_hw();
|
__fpregs_deactivate_hw();
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ void fpu__restore(void)
|
|||||||
kernel_fpu_disable();
|
kernel_fpu_disable();
|
||||||
fpregs_activate(fpu);
|
fpregs_activate(fpu);
|
||||||
if (unlikely(restore_fpu_checking(fpu))) {
|
if (unlikely(restore_fpu_checking(fpu))) {
|
||||||
fpu_reset_state(fpu);
|
fpu__reset(fpu);
|
||||||
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
|
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
|
||||||
} else {
|
} else {
|
||||||
tsk->thread.fpu.counter++;
|
tsk->thread.fpu.counter++;
|
||||||
@ -360,7 +360,7 @@ void fpu__clear(struct task_struct *tsk)
|
|||||||
|
|
||||||
if (!use_eager_fpu()) {
|
if (!use_eager_fpu()) {
|
||||||
/* FPU state will be reallocated lazily at the first use. */
|
/* FPU state will be reallocated lazily at the first use. */
|
||||||
drop_fpu(fpu);
|
fpu__drop(fpu);
|
||||||
} else {
|
} else {
|
||||||
if (!fpu->fpstate_active) {
|
if (!fpu->fpstate_active) {
|
||||||
fpu__activate_curr(fpu);
|
fpu__activate_curr(fpu);
|
||||||
|
@ -401,7 +401,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|||||||
config_enabled(CONFIG_IA32_EMULATION));
|
config_enabled(CONFIG_IA32_EMULATION));
|
||||||
|
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
fpu_reset_state(fpu);
|
fpu__reset(fpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -449,7 +449,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|||||||
* We will be ready to restore/save the state only after
|
* We will be ready to restore/save the state only after
|
||||||
* fpu->fpstate_active is again set.
|
* fpu->fpstate_active is again set.
|
||||||
*/
|
*/
|
||||||
drop_fpu(fpu);
|
fpu__drop(fpu);
|
||||||
|
|
||||||
if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
|
if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
|
||||||
__copy_from_user(&env, buf, sizeof(env))) {
|
__copy_from_user(&env, buf, sizeof(env))) {
|
||||||
@ -474,7 +474,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
|
|||||||
*/
|
*/
|
||||||
user_fpu_begin();
|
user_fpu_begin();
|
||||||
if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
|
if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
|
||||||
fpu_reset_state(fpu);
|
fpu__reset(fpu);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,7 +110,7 @@ void exit_thread(void)
|
|||||||
kfree(bp);
|
kfree(bp);
|
||||||
}
|
}
|
||||||
|
|
||||||
drop_fpu(fpu);
|
fpu__drop(fpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void flush_thread(void)
|
void flush_thread(void)
|
||||||
|
@ -667,7 +667,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|||||||
* Ensure the signal handler starts with the new fpu state.
|
* Ensure the signal handler starts with the new fpu state.
|
||||||
*/
|
*/
|
||||||
if (fpu->fpstate_active)
|
if (fpu->fpstate_active)
|
||||||
fpu_reset_state(fpu);
|
fpu__reset(fpu);
|
||||||
}
|
}
|
||||||
signal_setup_done(failed, ksig, stepping);
|
signal_setup_done(failed, ksig, stepping);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user