mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 01:54:09 +08:00
c7f631cb07
Quoting Linus: I do think that it would be a good idea to very expressly document the fact that it's not that the user access itself is unsafe. I do agree that things like "get_user()" want to be protected, but not because of any direct bugs or problems with get_user() and friends, but simply because get_user() is an excellent source of a pointer that is obviously controlled from a potentially attacking user space. So it's a prime candidate for then finding _subsequent_ accesses that can then be used to perturb the cache. Unlike the __get_user() case get_user() includes the address limit check near the pointer de-reference. With that locality the speculation can be mitigated with pointer narrowing rather than a barrier, i.e. array_index_nospec(). Where the narrowing is performed by: cmp %limit, %ptr sbb %mask, %mask and %mask, %ptr With respect to speculation the value of %ptr is either less than %limit or NULL. Co-developed-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: Kees Cook <keescook@chromium.org> Cc: kernel-hardening@lists.openwall.com Cc: gregkh@linuxfoundation.org Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andy Lutomirski <luto@kernel.org> Cc: torvalds@linux-foundation.org Cc: alan@linux.intel.com Link: https://lkml.kernel.org/r/151727417469.33451.11804043010080838495.stgit@dwillia2-desk3.amr.corp.intel.com
144 lines
3.0 KiB
ArmAsm
144 lines
3.0 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* __get_user functions.
|
|
*
|
|
* (C) Copyright 1998 Linus Torvalds
|
|
* (C) Copyright 2005 Andi Kleen
|
|
* (C) Copyright 2008 Glauber Costa
|
|
*
|
|
* These functions have a non-standard call interface
|
|
* to make them more efficient, especially as they
|
|
* return an error value in addition to the "real"
|
|
* return value.
|
|
*/
|
|
|
|
/*
|
|
* __get_user_X
|
|
*
|
|
* Inputs: %[r|e]ax contains the address.
|
|
*
|
|
* Outputs: %[r|e]ax is error code (0 or -EFAULT)
|
|
* %[r|e]dx contains zero-extended value
|
|
* %ecx contains the high half for 32-bit __get_user_8
|
|
*
|
|
*
|
|
* These functions should not modify any other registers,
|
|
* as they get called from within inline assembly.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/page_types.h>
|
|
#include <asm/errno.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/smap.h>
|
|
#include <asm/export.h>
|
|
|
|
.text
|
|
ENTRY(__get_user_1)
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
1: movzbl (%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
ENDPROC(__get_user_1)
|
|
EXPORT_SYMBOL(__get_user_1)
|
|
|
|
ENTRY(__get_user_2)
|
|
add $1,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
2: movzwl -1(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
ENDPROC(__get_user_2)
|
|
EXPORT_SYMBOL(__get_user_2)
|
|
|
|
ENTRY(__get_user_4)
|
|
add $3,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
3: movl -3(%_ASM_AX),%edx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
ENDPROC(__get_user_4)
|
|
EXPORT_SYMBOL(__get_user_4)
|
|
|
|
ENTRY(__get_user_8)
|
|
#ifdef CONFIG_X86_64
|
|
add $7,%_ASM_AX
|
|
jc bad_get_user
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
4: movq -7(%_ASM_AX),%rdx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
#else
|
|
add $7,%_ASM_AX
|
|
jc bad_get_user_8
|
|
mov PER_CPU_VAR(current_task), %_ASM_DX
|
|
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
|
|
jae bad_get_user_8
|
|
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
|
|
and %_ASM_DX, %_ASM_AX
|
|
ASM_STAC
|
|
4: movl -7(%_ASM_AX),%edx
|
|
5: movl -3(%_ASM_AX),%ecx
|
|
xor %eax,%eax
|
|
ASM_CLAC
|
|
ret
|
|
#endif
|
|
ENDPROC(__get_user_8)
|
|
EXPORT_SYMBOL(__get_user_8)
|
|
|
|
|
|
bad_get_user:
|
|
xor %edx,%edx
|
|
mov $(-EFAULT),%_ASM_AX
|
|
ASM_CLAC
|
|
ret
|
|
END(bad_get_user)
|
|
|
|
#ifdef CONFIG_X86_32
|
|
bad_get_user_8:
|
|
xor %edx,%edx
|
|
xor %ecx,%ecx
|
|
mov $(-EFAULT),%_ASM_AX
|
|
ASM_CLAC
|
|
ret
|
|
END(bad_get_user_8)
|
|
#endif
|
|
|
|
_ASM_EXTABLE(1b,bad_get_user)
|
|
_ASM_EXTABLE(2b,bad_get_user)
|
|
_ASM_EXTABLE(3b,bad_get_user)
|
|
#ifdef CONFIG_X86_64
|
|
_ASM_EXTABLE(4b,bad_get_user)
|
|
#else
|
|
_ASM_EXTABLE(4b,bad_get_user_8)
|
|
_ASM_EXTABLE(5b,bad_get_user_8)
|
|
#endif
|