mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-05 21:35:04 +08:00
2cb3427642
This change provides a simpler implementation of mte_get_mem_tag(), mte_get_random_tag(), and mte_set_mem_tag_range(). Simplifications include removing system_supports_mte() checks as these functions are onlye called from KASAN runtime that had already checked system_supports_mte(). Besides that, size and address alignment checks are removed from mte_set_mem_tag_range(), as KASAN now does those. This change also moves these functions into the asm/mte-kasan.h header and implements mte_set_mem_tag_range() via inline assembly to avoid unnecessary functions calls. [vincenzo.frascino@arm.com: fix warning in mte_get_random_tag()] Link: https://lkml.kernel.org/r/20210211152208.23811-1-vincenzo.frascino@arm.com Link: https://lkml.kernel.org/r/a26121b294fdf76e369cb7a74351d1c03a908930.1612546384.git.andreyknvl@google.com Co-developed-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com> Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Branislav Rankov <Branislav.Rankov@arm.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Marco Elver <elver@google.com> Cc: Peter Collingbourne <pcc@google.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
49 lines
1.5 KiB
C
49 lines
1.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_KASAN_H
|
|
#define __ASM_KASAN_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/mte-kasan.h>
|
|
#include <asm/pgtable-types.h>
|
|
|
|
#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
|
|
#define arch_kasan_reset_tag(addr) __tag_reset(addr)
|
|
#define arch_kasan_get_tag(addr) __tag_get(addr)
|
|
|
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
|
|
|
void kasan_init(void);
|
|
|
|
/*
|
|
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
|
|
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
|
|
* where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
|
|
*
|
|
* KASAN_SHADOW_OFFSET:
|
|
* This value is used to map an address to the corresponding shadow
|
|
* address by the following formula:
|
|
* shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
|
|
*
|
|
* (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
|
|
* [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
|
|
* addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
|
|
* KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
|
|
* (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
|
|
*/
|
|
#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
|
|
#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
|
|
|
|
void kasan_copy_shadow(pgd_t *pgdir);
|
|
asmlinkage void kasan_early_init(void);
|
|
|
|
#else
|
|
static inline void kasan_init(void) { }
|
|
static inline void kasan_copy_shadow(pgd_t *pgdir) { }
|
|
#endif
|
|
|
|
#endif
|
|
#endif
|