mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 16:44:10 +08:00
e16aeb0726
Some Armv8.3 Pointer Authentication enhancements have been introduced which are mandatory for Armv8.6 and optional for Armv8.3. These features are, * ARMv8.3-PAuth2 - An enhanced PAC generation logic is added which hardens finding the correct PAC value of the authenticated pointer. * ARMv8.3-FPAC - Fault is generated now when the ptrauth authentication instruction fails in authenticating the PAC present in the address. This is different from earlier case when such failures just adds an error code in the top byte and waits for subsequent load/store to abort. The ptrauth instructions which may cause this fault are autiasp, retaa etc. The above features are now represented by additional configurations for the Address Authentication cpufeature and a new ESR exception class. The userspace fault received in the kernel due to ARMv8.3-FPAC is treated as Illegal instruction and hence signal SIGILL is injected with ILL_ILLOPN as the signal code. Note that this is different from earlier ARMv8.3 ptrauth where signal SIGSEGV is issued due to Pointer authentication failures. The in-kernel PAC fault causes kernel to crash. Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com> Reviewed-by: Dave Martin <Dave.Martin@arm.com> Link: https://lore.kernel.org/r/20200914083656.21428-4-amit.kachhap@arm.com Signed-off-by: Will Deacon <will@kernel.org>
365 lines
8.0 KiB
C
365 lines
8.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Exception handling code
|
|
*
|
|
* Copyright (C) 2019 ARM Ltd.
|
|
*/
|
|
|
|
#include <linux/context_tracking.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/thread_info.h>
|
|
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/daifflags.h>
|
|
#include <asm/esr.h>
|
|
#include <asm/exception.h>
|
|
#include <asm/kprobes.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/sysreg.h>
|
|
|
|
static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
unsigned long far = read_sysreg(far_el1);
|
|
|
|
local_daif_inherit(regs);
|
|
far = untagged_addr(far);
|
|
do_mem_abort(far, esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el1_abort);
|
|
|
|
static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
unsigned long far = read_sysreg(far_el1);
|
|
|
|
local_daif_inherit(regs);
|
|
do_sp_pc_abort(far, esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el1_pc);
|
|
|
|
static void notrace el1_undef(struct pt_regs *regs)
|
|
{
|
|
local_daif_inherit(regs);
|
|
do_undefinstr(regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el1_undef);
|
|
|
|
static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
local_daif_inherit(regs);
|
|
bad_mode(regs, 0, esr);
|
|
}
|
|
NOKPROBE_SYMBOL(el1_inv);
|
|
|
|
static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
unsigned long far = read_sysreg(far_el1);
|
|
|
|
/*
|
|
* The CPU masked interrupts, and we are leaving them masked during
|
|
* do_debug_exception(). Update PMR as if we had called
|
|
* local_daif_mask().
|
|
*/
|
|
if (system_uses_irq_prio_masking())
|
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
|
|
|
do_debug_exception(far, esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el1_dbg);
|
|
|
|
static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
local_daif_inherit(regs);
|
|
do_ptrauth_fault(regs, esr);
|
|
}
|
|
NOKPROBE_SYMBOL(el1_fpac);
|
|
|
|
asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
|
|
{
|
|
unsigned long esr = read_sysreg(esr_el1);
|
|
|
|
switch (ESR_ELx_EC(esr)) {
|
|
case ESR_ELx_EC_DABT_CUR:
|
|
case ESR_ELx_EC_IABT_CUR:
|
|
el1_abort(regs, esr);
|
|
break;
|
|
/*
|
|
* We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
|
|
* recursive exception when trying to push the initial pt_regs.
|
|
*/
|
|
case ESR_ELx_EC_PC_ALIGN:
|
|
el1_pc(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_SYS64:
|
|
case ESR_ELx_EC_UNKNOWN:
|
|
el1_undef(regs);
|
|
break;
|
|
case ESR_ELx_EC_BREAKPT_CUR:
|
|
case ESR_ELx_EC_SOFTSTP_CUR:
|
|
case ESR_ELx_EC_WATCHPT_CUR:
|
|
case ESR_ELx_EC_BRK64:
|
|
el1_dbg(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_FPAC:
|
|
el1_fpac(regs, esr);
|
|
break;
|
|
default:
|
|
el1_inv(regs, esr);
|
|
}
|
|
}
|
|
NOKPROBE_SYMBOL(el1_sync_handler);
|
|
|
|
static void notrace el0_da(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
unsigned long far = read_sysreg(far_el1);
|
|
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
far = untagged_addr(far);
|
|
do_mem_abort(far, esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_da);
|
|
|
|
static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
unsigned long far = read_sysreg(far_el1);
|
|
|
|
/*
|
|
* We've taken an instruction abort from userspace and not yet
|
|
* re-enabled IRQs. If the address is a kernel address, apply
|
|
* BP hardening prior to enabling IRQs and pre-emption.
|
|
*/
|
|
if (!is_ttbr0_addr(far))
|
|
arm64_apply_bp_hardening();
|
|
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_mem_abort(far, esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_ia);
|
|
|
|
static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_fpsimd_acc(esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_fpsimd_acc);
|
|
|
|
static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_sve_acc(esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_sve_acc);
|
|
|
|
static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_fpsimd_exc(esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_fpsimd_exc);
|
|
|
|
static void notrace el0_sys(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_sysinstr(esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_sys);
|
|
|
|
static void notrace el0_pc(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
unsigned long far = read_sysreg(far_el1);
|
|
|
|
if (!is_ttbr0_addr(instruction_pointer(regs)))
|
|
arm64_apply_bp_hardening();
|
|
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_sp_pc_abort(far, esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_pc);
|
|
|
|
static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_sp_pc_abort(regs->sp, esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_sp);
|
|
|
|
static void notrace el0_undef(struct pt_regs *regs)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_undefinstr(regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_undef);
|
|
|
|
static void notrace el0_bti(struct pt_regs *regs)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_bti(regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_bti);
|
|
|
|
static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
bad_el0_sync(regs, 0, esr);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_inv);
|
|
|
|
static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
|
|
unsigned long far = read_sysreg(far_el1);
|
|
|
|
if (system_uses_irq_prio_masking())
|
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
|
|
|
user_exit_irqoff();
|
|
do_debug_exception(far, esr, regs);
|
|
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_dbg);
|
|
|
|
static void notrace el0_svc(struct pt_regs *regs)
|
|
{
|
|
if (system_uses_irq_prio_masking())
|
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
|
|
|
do_el0_svc(regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_svc);
|
|
|
|
static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_ptrauth_fault(regs, esr);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_fpac);
|
|
|
|
asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
|
|
{
|
|
unsigned long esr = read_sysreg(esr_el1);
|
|
|
|
switch (ESR_ELx_EC(esr)) {
|
|
case ESR_ELx_EC_SVC64:
|
|
el0_svc(regs);
|
|
break;
|
|
case ESR_ELx_EC_DABT_LOW:
|
|
el0_da(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_IABT_LOW:
|
|
el0_ia(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_FP_ASIMD:
|
|
el0_fpsimd_acc(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_SVE:
|
|
el0_sve_acc(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_FP_EXC64:
|
|
el0_fpsimd_exc(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_SYS64:
|
|
case ESR_ELx_EC_WFx:
|
|
el0_sys(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_SP_ALIGN:
|
|
el0_sp(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_PC_ALIGN:
|
|
el0_pc(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_UNKNOWN:
|
|
el0_undef(regs);
|
|
break;
|
|
case ESR_ELx_EC_BTI:
|
|
el0_bti(regs);
|
|
break;
|
|
case ESR_ELx_EC_BREAKPT_LOW:
|
|
case ESR_ELx_EC_SOFTSTP_LOW:
|
|
case ESR_ELx_EC_WATCHPT_LOW:
|
|
case ESR_ELx_EC_BRK64:
|
|
el0_dbg(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_FPAC:
|
|
el0_fpac(regs, esr);
|
|
break;
|
|
default:
|
|
el0_inv(regs, esr);
|
|
}
|
|
}
|
|
NOKPROBE_SYMBOL(el0_sync_handler);
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr)
|
|
{
|
|
user_exit_irqoff();
|
|
local_daif_restore(DAIF_PROCCTX);
|
|
do_cp15instr(esr, regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_cp15);
|
|
|
|
static void notrace el0_svc_compat(struct pt_regs *regs)
|
|
{
|
|
if (system_uses_irq_prio_masking())
|
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
|
|
|
do_el0_svc_compat(regs);
|
|
}
|
|
NOKPROBE_SYMBOL(el0_svc_compat);
|
|
|
|
asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
|
|
{
|
|
unsigned long esr = read_sysreg(esr_el1);
|
|
|
|
switch (ESR_ELx_EC(esr)) {
|
|
case ESR_ELx_EC_SVC32:
|
|
el0_svc_compat(regs);
|
|
break;
|
|
case ESR_ELx_EC_DABT_LOW:
|
|
el0_da(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_IABT_LOW:
|
|
el0_ia(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_FP_ASIMD:
|
|
el0_fpsimd_acc(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_FP_EXC32:
|
|
el0_fpsimd_exc(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_PC_ALIGN:
|
|
el0_pc(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_UNKNOWN:
|
|
case ESR_ELx_EC_CP14_MR:
|
|
case ESR_ELx_EC_CP14_LS:
|
|
case ESR_ELx_EC_CP14_64:
|
|
el0_undef(regs);
|
|
break;
|
|
case ESR_ELx_EC_CP15_32:
|
|
case ESR_ELx_EC_CP15_64:
|
|
el0_cp15(regs, esr);
|
|
break;
|
|
case ESR_ELx_EC_BREAKPT_LOW:
|
|
case ESR_ELx_EC_SOFTSTP_LOW:
|
|
case ESR_ELx_EC_WATCHPT_LOW:
|
|
case ESR_ELx_EC_BKPT32:
|
|
el0_dbg(regs, esr);
|
|
break;
|
|
default:
|
|
el0_inv(regs, esr);
|
|
}
|
|
}
|
|
NOKPROBE_SYMBOL(el0_sync_compat_handler);
|
|
#endif /* CONFIG_COMPAT */
|