x86/fpu/signal: Change return type of __fpu_restore_sig() to boolean

Now that fpu__restore_sig() returns a boolean get rid of the individual
error codes in __fpu_restore_sig() as well.

Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210908132525.966197097@linutronix.de
This commit is contained in:
Thomas Gleixner 2021-09-08 15:29:38 +02:00 committed by Borislav Petkov
parent f3305be5fe
commit 1193f408cd

View File

@ -309,8 +309,8 @@ retry:
return 0;
}
static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
bool ia32_fxstate)
static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx,
bool ia32_fxstate)
{
int state_size = fpu_kernel_xstate_size;
struct task_struct *tsk = current;
@ -318,14 +318,14 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
struct user_i387_ia32_struct env;
u64 user_xfeatures = 0;
bool fx_only = false;
int ret;
bool success;
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
ret = check_xstate_in_sigframe(buf_fx, &fx_sw_user);
if (unlikely(ret))
return ret;
if (check_xstate_in_sigframe(buf_fx, &fx_sw_user))
return false;
fx_only = !fx_sw_user.magic1;
state_size = fx_sw_user.xstate_size;
@ -341,8 +341,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
* faults. If it does, fall back to the slow path below, going
* through the kernel buffer with the enabled pagefault handler.
*/
return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
state_size);
return !restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
state_size);
}
/*
@ -350,9 +350,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
* to be ignored for histerical raisins. The legacy state is folded
* in once the larger state has been copied.
*/
ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
return ret;
if (__copy_from_user(&env, buf, sizeof(env)))
return false;
/*
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
@ -379,17 +378,16 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
fpregs_unlock();
if (use_xsave() && !fx_only) {
ret = copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx);
if (ret)
return ret;
if (copy_sigframe_from_user_to_xstate(&fpu->state.xsave, buf_fx))
return false;
} else {
if (__copy_from_user(&fpu->state.fxsave, buf_fx,
sizeof(fpu->state.fxsave)))
return -EFAULT;
return false;
/* Reject invalid MXCSR values. */
if (fpu->state.fxsave.mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
return false;
/* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */
if (use_xsave())
@ -413,17 +411,18 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
u64 mask = user_xfeatures | xfeatures_mask_supervisor();
fpu->state.xsave.header.xfeatures &= mask;
ret = os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all) ? -EINVAL : 0;
success = !os_xrstor_safe(&fpu->state.xsave, xfeatures_mask_all);
} else {
ret = fxrstor_safe(&fpu->state.fxsave);
success = !fxrstor_safe(&fpu->state.fxsave);
}
if (likely(!ret))
if (likely(success))
fpregs_mark_activate();
fpregs_unlock();
return ret;
return success;
}
static inline int xstate_sigframe_size(void)
{
return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
@ -467,7 +466,7 @@ bool fpu__restore_sig(void __user *buf, int ia32_frame)
sizeof(struct user_i387_ia32_struct),
NULL, buf);
} else {
success = !__fpu_restore_sig(buf, buf_fx, ia32_fxstate);
success = __fpu_restore_sig(buf, buf_fx, ia32_fxstate);
}
out: