2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 15:43:59 +08:00

x86/fpu/xstate: Update sanitize_restored_xstate() for supervisor xstates

The function sanitize_restored_xstate() sanitizes user xstates of an XSAVE
buffer by clearing bits not in the input 'xfeatures' from the buffer's
header->xfeatures, effectively resetting those features back to the init
state.

When supervisor xstates are introduced, it is necessary to make sure only
user xstates are sanitized.  Ensure supervisor bits in header->xfeatures
stay set and supervisor states are not modified.

To make names clear, also:

- Rename the function to sanitize_restored_user_xstate().
- Rename input parameter 'xfeatures' to 'user_xfeatures'.
- In __fpu__restore_sig(), rename 'xfeatures' to 'user_xfeatures'.

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lkml.kernel.org/r/20200512145444.15483-7-yu-cheng.yu@intel.com
This commit is contained in:
Yu-cheng Yu 2020-05-12 07:54:40 -07:00 committed by Borislav Petkov
parent b860eb8dce
commit 5d6b6a6f9b

View File

@ -211,9 +211,9 @@ retry:
} }
static inline void static inline void
sanitize_restored_xstate(union fpregs_state *state, sanitize_restored_user_xstate(union fpregs_state *state,
struct user_i387_ia32_struct *ia32_env, struct user_i387_ia32_struct *ia32_env,
u64 xfeatures, int fx_only) u64 user_xfeatures, int fx_only)
{ {
struct xregs_state *xsave = &state->xsave; struct xregs_state *xsave = &state->xsave;
struct xstate_header *header = &xsave->header; struct xstate_header *header = &xsave->header;
@ -226,13 +226,22 @@ sanitize_restored_xstate(union fpregs_state *state,
*/ */
/* /*
* Init the state that is not present in the memory * 'user_xfeatures' might have bits clear which are
* layout and not enabled by the OS. * set in header->xfeatures. This represents features that
* were in init state prior to a signal delivery, and need
* to be reset back to the init state. Clear any user
* feature bits which are set in the kernel buffer to get
* them back to the init state.
*
* Supervisor state is unchanged by input from userspace.
* Ensure supervisor state bits stay set and supervisor
* state is not modified.
*/ */
if (fx_only) if (fx_only)
header->xfeatures = XFEATURE_MASK_FPSSE; header->xfeatures = XFEATURE_MASK_FPSSE;
else else
header->xfeatures &= xfeatures; header->xfeatures &= user_xfeatures |
xfeatures_mask_supervisor();
} }
if (use_fxsr()) { if (use_fxsr()) {
@ -281,7 +290,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu; struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env; struct user_i387_ia32_struct env;
u64 xfeatures = 0; u64 user_xfeatures = 0;
int fx_only = 0; int fx_only = 0;
int ret = 0; int ret = 0;
@ -314,7 +323,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
trace_x86_fpu_xstate_check_failed(fpu); trace_x86_fpu_xstate_check_failed(fpu);
} else { } else {
state_size = fx_sw_user.xstate_size; state_size = fx_sw_user.xstate_size;
xfeatures = fx_sw_user.xfeatures; user_xfeatures = fx_sw_user.xfeatures;
} }
} }
@ -349,7 +358,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
*/ */
fpregs_lock(); fpregs_lock();
pagefault_disable(); pagefault_disable();
ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only); ret = copy_user_to_fpregs_zeroing(buf_fx, user_xfeatures, fx_only);
pagefault_enable(); pagefault_enable();
if (!ret) { if (!ret) {
fpregs_mark_activate(); fpregs_mark_activate();
@ -362,7 +371,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (use_xsave() && !fx_only) { if (use_xsave() && !fx_only) {
u64 init_bv = xfeatures_mask_user() & ~xfeatures; u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
if (using_compacted_format()) { if (using_compacted_format()) {
ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx); ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
@ -375,12 +384,13 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (ret) if (ret)
goto err_out; goto err_out;
sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only); sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
fx_only);
fpregs_lock(); fpregs_lock();
if (unlikely(init_bv)) if (unlikely(init_bv))
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
ret = copy_kernel_to_xregs_err(&fpu->state.xsave, xfeatures); ret = copy_kernel_to_xregs_err(&fpu->state.xsave, user_xfeatures);
} else if (use_fxsr()) { } else if (use_fxsr()) {
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size); ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
@ -389,7 +399,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
goto err_out; goto err_out;
} }
sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only); sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
fx_only);
fpregs_lock(); fpregs_lock();
if (use_xsave()) { if (use_xsave()) {