2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00

arm64/uaccess: Enable hardened usercopy

Enables CONFIG_HARDENED_USERCOPY checks on arm64. As done by KASAN in -next,
renames the low-level functions to __arch_copy_*_user() so a static inline
can do additional work before the copy.

Signed-off-by: Kees Cook <keescook@chromium.org>
This commit is contained in:
Kees Cook 2016-06-23 15:59:42 -07:00
parent dfd45b6103
commit faf5b63e29
5 changed files with 29 additions and 13 deletions

View File

@ -51,6 +51,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)

View File

@ -256,24 +256,39 @@ do { \
-EFAULT; \
})
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
{
check_object_size(to, n, false);
return __arch_copy_from_user(to, from, n);
}
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
{
check_object_size(from, n, true);
return __arch_copy_to_user(to, from, n);
}
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else /* security hole - plug it */
if (access_ok(VERIFY_READ, from, n)) {
check_object_size(to, n, false);
n = __arch_copy_from_user(to, from, n);
} else /* security hole - plug it */
memset(to, 0, n);
return n;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
n = __copy_to_user(to, from, n);
if (access_ok(VERIFY_WRITE, to, n)) {
check_object_size(from, n, true);
n = __arch_copy_to_user(to, from, n);
}
return n;
}

View File

@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
/* user mem (segment) */
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__copy_in_user);

View File

@ -66,7 +66,7 @@
.endm
end .req x5
ENTRY(__copy_from_user)
ENTRY(__arch_copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy
ret
ENDPROC(__copy_from_user)
ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2

View File

@ -65,7 +65,7 @@
.endm
end .req x5
ENTRY(__copy_to_user)
ENTRY(__arch_copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
add end, x0, x2
@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
ENDPROC(__copy_to_user)
ENDPROC(__arch_copy_to_user)
.section .fixup,"ax"
.align 2