mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 21:24:00 +08:00
b1a74bf821
Preemption is disabled between kernel_fpu_begin/end() and as such it is not a good idea to use these routines in kvm_load/put_guest_fpu() which can be very far apart. kvm_load/put_guest_fpu() routines are already called with preemption disabled and KVM already uses the preempt notifier to save the guest fpu state using kvm_put_guest_fpu(). So introduce __kernel_fpu_begin/end() routines which don't touch preemption and use them instead of kernel_fpu_begin/end() for KVM's use model of saving/restoring guest FPU state. Also with this change (and with eagerFPU model), fix the host cr0.TS vm-exit state in the case of VMX. For eagerFPU case, host cr0.TS is always clear. So no need to worry about it. For the traditional lazyFPU restore case, change the cr0.TS bit for the host state during vm-exit to be always clear and cr0.TS bit is set in the __vmx_load_host_state() when the FPU (guest FPU or the host task's FPU) state is not active. This ensures that the host/guest FPU state is properly saved, restored during context-switch and with interrupts (using irq_fpu_usable()) not stomping on the active FPU state. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Link: http://lkml.kernel.org/r/1348164109.26695.338.camel@sbsiddha-desk.sc.intel.com Cc: Avi Kivity <avi@redhat.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
105 lines
2.6 KiB
C
105 lines
2.6 KiB
C
/*
|
|
* Copyright (C) 1994 Linus Torvalds
|
|
*
|
|
* Pentium III FXSR, SSE support
|
|
* General FPU state handling cleanups
|
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
|
* x86-64 work by Andi Kleen 2002
|
|
*/
|
|
|
|
#ifndef _ASM_X86_I387_H
|
|
#define _ASM_X86_I387_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/hardirq.h>
|
|
|
|
struct pt_regs;
|
|
struct user_i387_struct;
|
|
|
|
extern int init_fpu(struct task_struct *child);
|
|
extern void fpu_finit(struct fpu *fpu);
|
|
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
|
|
extern void math_state_restore(void);
|
|
|
|
extern bool irq_fpu_usable(void);
|
|
|
|
/*
|
|
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled
|
|
* and they don't touch the preempt state on their own.
|
|
* If you enable preemption after __kernel_fpu_begin(), preempt notifier
|
|
* should call the __kernel_fpu_end() to prevent the kernel/user FPU
|
|
* state from getting corrupted. KVM for example uses this model.
|
|
*
|
|
* All other cases use kernel_fpu_begin/end() which disable preemption
|
|
* during kernel FPU usage.
|
|
*/
|
|
extern void __kernel_fpu_begin(void);
|
|
extern void __kernel_fpu_end(void);
|
|
|
|
static inline void kernel_fpu_begin(void)
|
|
{
|
|
WARN_ON_ONCE(!irq_fpu_usable());
|
|
preempt_disable();
|
|
__kernel_fpu_begin();
|
|
}
|
|
|
|
static inline void kernel_fpu_end(void)
|
|
{
|
|
__kernel_fpu_end();
|
|
preempt_enable();
|
|
}
|
|
|
|
/*
|
|
* Some instructions like VIA's padlock instructions generate a spurious
|
|
* DNA fault but don't modify SSE registers. And these instructions
|
|
* get used from interrupt context as well. To prevent these kernel instructions
|
|
* in interrupt context interacting wrongly with other user/kernel fpu usage, we
|
|
* should use them only in the context of irq_ts_save/restore()
|
|
*/
|
|
static inline int irq_ts_save(void)
|
|
{
|
|
/*
|
|
* If in process context and not atomic, we can take a spurious DNA fault.
|
|
* Otherwise, doing clts() in process context requires disabling preemption
|
|
* or some heavy lifting like kernel_fpu_begin()
|
|
*/
|
|
if (!in_atomic())
|
|
return 0;
|
|
|
|
if (read_cr0() & X86_CR0_TS) {
|
|
clts();
|
|
return 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void irq_ts_restore(int TS_state)
|
|
{
|
|
if (TS_state)
|
|
stts();
|
|
}
|
|
|
|
/*
|
|
* The question "does this thread have fpu access?"
|
|
* is slightly racy, since preemption could come in
|
|
* and revoke it immediately after the test.
|
|
*
|
|
* However, even in that very unlikely scenario,
|
|
* we can just assume we have FPU access - typically
|
|
* to save the FP state - we'll just take a #NM
|
|
* fault and get the FPU access back.
|
|
*/
|
|
static inline int user_has_fpu(void)
|
|
{
|
|
return current->thread.fpu.has_fpu;
|
|
}
|
|
|
|
extern void unlazy_fpu(struct task_struct *tsk);
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_X86_I387_H */
|