mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 07:04:00 +08:00
ARM: 9014/2: Replace string mem* functions for KASan
Functions like memset()/memmove()/memcpy() do a lot of memory accesses. If a bad pointer is passed to one of these functions it is important to catch this. Compiler instrumentation cannot do this since these functions are written in assembly. KASan replaces these memory functions with instrumented variants. The original functions are declared as weak symbols so that the strong definitions in mm/kasan/kasan.c can replace them. The original functions have aliases with a '__' prefix in their name, so we can call the non-instrumented variant if needed. We must use __memcpy()/__memset() in place of memcpy()/memset() when we copy .data to RAM and when we clear .bss, because kasan_early_init cannot be called before the initialization of .data and .bss. For the kernel compression and EFI libstub's custom string libraries we need a special quirk: even if these are built without KASan enabled, they rely on the global headers for their custom string libraries, which means that e.g. memcpy() will be defined to __memcpy() and we get link failures. Since these implementations are written i C rather than assembly we use e.g. __alias(memcpy) to redirected any users back to the local implementation. Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: kasan-dev@googlegroups.com Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Tested-by: Ard Biesheuvel <ardb@kernel.org> # QEMU/KVM/mach-virt/LPAE/8G Tested-by: Florian Fainelli <f.fainelli@gmail.com> # Brahma SoCs Tested-by: Ahmad Fatoum <a.fatoum@pengutronix.de> # i.MX6Q Reported-by: Russell King - ARM Linux <rmk+kernel@armlinux.org.uk> Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de> Signed-off-by: Abbott Liu <liuwenliang@huawei.com> Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
This commit is contained in:
parent
d5d44e7e35
commit
d6d51a96c7
@ -7,6 +7,25 @@
|
||||
|
||||
#include <linux/string.h>
|
||||
|
||||
/*
|
||||
* The decompressor is built without KASan but uses the same redirects as the
|
||||
* rest of the kernel when CONFIG_KASAN is enabled, defining e.g. memcpy()
|
||||
* to __memcpy() but since we are not linking with the main kernel string
|
||||
* library in the decompressor, that will lead to link failures.
|
||||
*
|
||||
* Undefine KASan's versions, define the wrapped functions and alias them to
|
||||
* the right names so that when e.g. __memcpy() appear in the code, it will
|
||||
* still be linked to this local version of memcpy().
|
||||
*/
|
||||
#ifdef CONFIG_KASAN
|
||||
#undef memcpy
|
||||
#undef memmove
|
||||
#undef memset
|
||||
void *__memcpy(void *__dest, __const void *__src, size_t __n) __alias(memcpy);
|
||||
void *__memmove(void *__dest, __const void *__src, size_t count) __alias(memmove);
|
||||
void *__memset(void *s, int c, size_t count) __alias(memset);
|
||||
#endif
|
||||
|
||||
void *memcpy(void *__dest, __const void *__src, size_t __n)
|
||||
{
|
||||
int i = 0;
|
||||
|
@ -5,6 +5,9 @@
|
||||
/*
|
||||
* We don't do inline string functions, since the
|
||||
* optimised inline asm versions are not small.
|
||||
*
|
||||
* The __underscore versions of some functions are for KASan to be able
|
||||
* to replace them with instrumented versions.
|
||||
*/
|
||||
|
||||
#define __HAVE_ARCH_STRRCHR
|
||||
@ -15,15 +18,18 @@ extern char * strchr(const char * s, int c);
|
||||
|
||||
#define __HAVE_ARCH_MEMCPY
|
||||
extern void * memcpy(void *, const void *, __kernel_size_t);
|
||||
extern void *__memcpy(void *dest, const void *src, __kernel_size_t n);
|
||||
|
||||
#define __HAVE_ARCH_MEMMOVE
|
||||
extern void * memmove(void *, const void *, __kernel_size_t);
|
||||
extern void *__memmove(void *dest, const void *src, __kernel_size_t n);
|
||||
|
||||
#define __HAVE_ARCH_MEMCHR
|
||||
extern void * memchr(const void *, int, __kernel_size_t);
|
||||
|
||||
#define __HAVE_ARCH_MEMSET
|
||||
extern void * memset(void *, int, __kernel_size_t);
|
||||
extern void *__memset(void *s, int c, __kernel_size_t n);
|
||||
|
||||
#define __HAVE_ARCH_MEMSET32
|
||||
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
|
||||
@ -39,4 +45,24 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
|
||||
return __memset64(p, v, n * 8, v >> 32);
|
||||
}
|
||||
|
||||
/*
|
||||
* For files that are not instrumented (e.g. mm/slub.c) we
|
||||
* must use non-instrumented versions of the mem*
|
||||
* functions named __memcpy() etc. All such kernel code has
|
||||
* been tagged with KASAN_SANITIZE_file.o = n, which means
|
||||
* that the address sanitization argument isn't passed to the
|
||||
* compiler, and __SANITIZE_ADDRESS__ is not set. As a result
|
||||
* these defines kick in.
|
||||
*/
|
||||
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
|
||||
#define memcpy(dst, src, len) __memcpy(dst, src, len)
|
||||
#define memmove(dst, src, len) __memmove(dst, src, len)
|
||||
#define memset(s, c, n) __memset(s, c, n)
|
||||
|
||||
#ifndef __NO_FORTIFY
|
||||
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -95,7 +95,7 @@ __mmap_switched:
|
||||
THUMB( ldmia r4!, {r0, r1, r2, r3} )
|
||||
THUMB( mov sp, r3 )
|
||||
sub r2, r2, r1
|
||||
bl memcpy @ copy .data to RAM
|
||||
bl __memcpy @ copy .data to RAM
|
||||
#endif
|
||||
|
||||
ARM( ldmia r4!, {r0, r1, sp} )
|
||||
@ -103,7 +103,7 @@ __mmap_switched:
|
||||
THUMB( mov sp, r3 )
|
||||
sub r2, r1, r0
|
||||
mov r1, #0
|
||||
bl memset @ clear .bss
|
||||
bl __memset @ clear .bss
|
||||
|
||||
ldmia r4, {r0, r1, r2, r3}
|
||||
str r9, [r0] @ Save processor ID
|
||||
|
@ -58,6 +58,8 @@
|
||||
|
||||
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
|
||||
|
||||
.weak memcpy
|
||||
ENTRY(__memcpy)
|
||||
ENTRY(mmiocpy)
|
||||
ENTRY(memcpy)
|
||||
|
||||
@ -65,3 +67,4 @@ ENTRY(memcpy)
|
||||
|
||||
ENDPROC(memcpy)
|
||||
ENDPROC(mmiocpy)
|
||||
ENDPROC(__memcpy)
|
||||
|
@ -24,12 +24,14 @@
|
||||
* occurring in the opposite direction.
|
||||
*/
|
||||
|
||||
.weak memmove
|
||||
ENTRY(__memmove)
|
||||
ENTRY(memmove)
|
||||
UNWIND( .fnstart )
|
||||
|
||||
subs ip, r0, r1
|
||||
cmphi r2, ip
|
||||
bls memcpy
|
||||
bls __memcpy
|
||||
|
||||
stmfd sp!, {r0, r4, lr}
|
||||
UNWIND( .fnend )
|
||||
@ -222,3 +224,4 @@ ENTRY(memmove)
|
||||
18: backward_copy_shift push=24 pull=8
|
||||
|
||||
ENDPROC(memmove)
|
||||
ENDPROC(__memmove)
|
||||
|
@ -13,6 +13,8 @@
|
||||
.text
|
||||
.align 5
|
||||
|
||||
.weak memset
|
||||
ENTRY(__memset)
|
||||
ENTRY(mmioset)
|
||||
ENTRY(memset)
|
||||
UNWIND( .fnstart )
|
||||
@ -132,6 +134,7 @@ UNWIND( .fnstart )
|
||||
UNWIND( .fnend )
|
||||
ENDPROC(memset)
|
||||
ENDPROC(mmioset)
|
||||
ENDPROC(__memset)
|
||||
|
||||
ENTRY(__memset32)
|
||||
UNWIND( .fnstart )
|
||||
|
Loading…
Reference in New Issue
Block a user