mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 23:45:31 +08:00
62a679cb28
Currently __cpu_setup conditionally initializes the address authentication keys and enables them in SCTLR_EL1, doing so differently for the primary CPU and secondary CPUs, and skipping this work for CPUs returning from an idle state. For the latter case, cpu_do_resume restores the keys and SCTLR_EL1 value after the MMU has been enabled. This flow is rather difficult to follow, so instead let's move the primary and secondary CPU initialization into their respective boot paths. By following the example of cpu_do_resume and doing so once the MMU is enabled, we can always initialize the keys from the values in thread_struct, and avoid the machinery necessary to pass the keys in secondary_data or open-coding initialization for the boot CPU. This means we perform an additional RMW of SCTLR_EL1, but we already do this in the cpu_do_resume path, and for other features in cpufeature.c, so this isn't a major concern in a bringup path. Note that even while the enable bits are clear, the key registers are accessible. As this now renders the argument to __cpu_setup redundant, let's also remove that entirely. Future extensions can follow a similar approach to initialize values that differ for primary/secondary CPUs. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com> Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20200423101606.37601-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
99 lines
2.8 KiB
C
99 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_ASM_POINTER_AUTH_H
|
|
#define __ASM_ASM_POINTER_AUTH_H
|
|
|
|
#include <asm/alternative.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/sysreg.h>
|
|
|
|
#ifdef CONFIG_ARM64_PTR_AUTH
|
|
/*
|
|
* thread.keys_user.ap* as offset exceeds the #imm offset range
|
|
* so use the base value of ldp as thread.keys_user and offset as
|
|
* thread.keys_user.ap*.
|
|
*/
|
|
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
|
|
mov \tmp1, #THREAD_KEYS_USER
|
|
add \tmp1, \tsk, \tmp1
|
|
alternative_if_not ARM64_HAS_ADDRESS_AUTH
|
|
b .Laddr_auth_skip_\@
|
|
alternative_else_nop_endif
|
|
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
|
|
msr_s SYS_APIAKEYLO_EL1, \tmp2
|
|
msr_s SYS_APIAKEYHI_EL1, \tmp3
|
|
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
|
|
msr_s SYS_APIBKEYLO_EL1, \tmp2
|
|
msr_s SYS_APIBKEYHI_EL1, \tmp3
|
|
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
|
|
msr_s SYS_APDAKEYLO_EL1, \tmp2
|
|
msr_s SYS_APDAKEYHI_EL1, \tmp3
|
|
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
|
|
msr_s SYS_APDBKEYLO_EL1, \tmp2
|
|
msr_s SYS_APDBKEYHI_EL1, \tmp3
|
|
.Laddr_auth_skip_\@:
|
|
alternative_if ARM64_HAS_GENERIC_AUTH
|
|
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
|
|
msr_s SYS_APGAKEYLO_EL1, \tmp2
|
|
msr_s SYS_APGAKEYHI_EL1, \tmp3
|
|
alternative_else_nop_endif
|
|
.endm
|
|
|
|
.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
|
|
mov \tmp1, #THREAD_KEYS_KERNEL
|
|
add \tmp1, \tsk, \tmp1
|
|
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_KERNEL_KEY_APIA]
|
|
msr_s SYS_APIAKEYLO_EL1, \tmp2
|
|
msr_s SYS_APIAKEYHI_EL1, \tmp3
|
|
.endm
|
|
|
|
.macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
|
|
alternative_if ARM64_HAS_ADDRESS_AUTH
|
|
__ptrauth_keys_install_kernel_nosync \tsk, \tmp1, \tmp2, \tmp3
|
|
alternative_else_nop_endif
|
|
.endm
|
|
|
|
.macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
|
|
alternative_if ARM64_HAS_ADDRESS_AUTH
|
|
__ptrauth_keys_install_kernel_nosync \tsk, \tmp1, \tmp2, \tmp3
|
|
isb
|
|
alternative_else_nop_endif
|
|
.endm
|
|
|
|
.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
|
|
mrs \tmp1, id_aa64isar1_el1
|
|
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
|
|
cbz \tmp1, .Lno_addr_auth\@
|
|
mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
|
|
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
|
|
mrs \tmp2, sctlr_el1
|
|
orr \tmp2, \tmp2, \tmp1
|
|
msr sctlr_el1, \tmp2
|
|
__ptrauth_keys_install_kernel_nosync \tsk, \tmp1, \tmp2, \tmp3
|
|
isb
|
|
.Lno_addr_auth\@:
|
|
.endm
|
|
|
|
.macro ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
|
|
alternative_if_not ARM64_HAS_ADDRESS_AUTH
|
|
b .Lno_addr_auth\@
|
|
alternative_else_nop_endif
|
|
__ptrauth_keys_init_cpu \tsk, \tmp1, \tmp2, \tmp3
|
|
.Lno_addr_auth\@:
|
|
.endm
|
|
|
|
#else /* CONFIG_ARM64_PTR_AUTH */
|
|
|
|
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
|
|
.endm
|
|
|
|
.macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
|
|
.endm
|
|
|
|
.macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
|
|
.endm
|
|
|
|
#endif /* CONFIG_ARM64_PTR_AUTH */
|
|
|
|
#endif /* __ASM_ASM_POINTER_AUTH_H */
|