mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 23:24:11 +08:00
Merge branch 'for-next/errata' into for-next/core
Rework of the workaround for Cortex-A76 erratum 1463225 to fit in better with the ongoing exception entry cleanups and changes to the detection code for Cortex-A55 erratum 1024718 since it applies to all revisions of the silicon. * for-next/errata: arm64: entry: consolidate Cortex-A76 erratum 1463225 workaround arm64: Extend workaround for erratum 1024718 to all versions of Cortex-A55
This commit is contained in:
commit
90eb8c9d94
@ -522,7 +522,7 @@ config ARM64_ERRATUM_1024718
|
||||
help
|
||||
This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
|
||||
|
||||
Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
|
||||
Affected Cortex-A55 cores (all revisions) could cause incorrect
|
||||
update of the hardware dirty bit when the DBM/AP bits are updated
|
||||
without a break-before-make. The workaround is to disable the usage
|
||||
of hardware DBM locally on the affected cores. CPUs not affected by
|
||||
|
@ -107,8 +107,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1463225
|
||||
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
|
||||
|
||||
static bool
|
||||
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
|
@ -1506,7 +1506,7 @@ static bool cpu_has_broken_dbm(void)
|
||||
/* List of CPUs which have broken DBM support. */
|
||||
static const struct midr_range cpus[] = {
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1024718
|
||||
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
/* Kryo4xx Silver (rdpe => r1p0) */
|
||||
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
|
||||
#endif
|
||||
|
@ -109,6 +109,55 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
|
||||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1463225
|
||||
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
|
||||
|
||||
static void cortex_a76_erratum_1463225_svc_handler(void)
|
||||
{
|
||||
u32 reg, val;
|
||||
|
||||
if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
|
||||
return;
|
||||
|
||||
if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
|
||||
return;
|
||||
|
||||
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
|
||||
reg = read_sysreg(mdscr_el1);
|
||||
val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
|
||||
write_sysreg(val, mdscr_el1);
|
||||
asm volatile("msr daifclr, #8");
|
||||
isb();
|
||||
|
||||
/* We will have taken a single-step exception by this point */
|
||||
|
||||
write_sysreg(reg, mdscr_el1);
|
||||
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
|
||||
}
|
||||
|
||||
static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
|
||||
{
|
||||
if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We've taken a dummy step exception from the kernel to ensure
|
||||
* that interrupts are re-enabled on the syscall path. Return back
|
||||
* to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
|
||||
* masked so that we can safely restore the mdscr and get on with
|
||||
* handling the syscall.
|
||||
*/
|
||||
regs->pstate |= PSR_D_BIT;
|
||||
return true;
|
||||
}
|
||||
#else /* CONFIG_ARM64_ERRATUM_1463225 */
|
||||
static void cortex_a76_erratum_1463225_svc_handler(void) { }
|
||||
static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
|
||||
|
||||
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
@ -186,7 +235,8 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
arm64_enter_el1_dbg(regs);
|
||||
do_debug_exception(far, esr, regs);
|
||||
if (!cortex_a76_erratum_1463225_debug_handler(regs))
|
||||
do_debug_exception(far, esr, regs);
|
||||
arm64_exit_el1_dbg(regs);
|
||||
}
|
||||
|
||||
@ -362,6 +412,7 @@ static void noinstr el0_svc(struct pt_regs *regs)
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
enter_from_user_mode();
|
||||
cortex_a76_erratum_1463225_svc_handler();
|
||||
do_el0_svc(regs);
|
||||
}
|
||||
|
||||
@ -439,6 +490,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
enter_from_user_mode();
|
||||
cortex_a76_erratum_1463225_svc_handler();
|
||||
do_el0_svc_compat(regs);
|
||||
}
|
||||
|
||||
|
@ -65,35 +65,6 @@ static inline bool has_syscall_work(unsigned long flags)
|
||||
int syscall_trace_enter(struct pt_regs *regs);
|
||||
void syscall_trace_exit(struct pt_regs *regs);
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1463225
|
||||
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
|
||||
|
||||
static void cortex_a76_erratum_1463225_svc_handler(void)
|
||||
{
|
||||
u32 reg, val;
|
||||
|
||||
if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
|
||||
return;
|
||||
|
||||
if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
|
||||
return;
|
||||
|
||||
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
|
||||
reg = read_sysreg(mdscr_el1);
|
||||
val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
|
||||
write_sysreg(val, mdscr_el1);
|
||||
asm volatile("msr daifclr, #8");
|
||||
isb();
|
||||
|
||||
/* We will have taken a single-step exception by this point */
|
||||
|
||||
write_sysreg(reg, mdscr_el1);
|
||||
__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
|
||||
}
|
||||
#else
|
||||
static void cortex_a76_erratum_1463225_svc_handler(void) { }
|
||||
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
|
||||
|
||||
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
||||
const syscall_fn_t syscall_table[])
|
||||
{
|
||||
@ -120,7 +91,6 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
||||
* (Similarly for HVC and SMC elsewhere.)
|
||||
*/
|
||||
|
||||
cortex_a76_erratum_1463225_svc_handler();
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
|
||||
if (flags & _TIF_MTE_ASYNC_FAULT) {
|
||||
|
@ -874,44 +874,12 @@ static void debug_exception_exit(struct pt_regs *regs)
|
||||
}
|
||||
NOKPROBE_SYMBOL(debug_exception_exit);
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1463225
|
||||
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
|
||||
|
||||
static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
return 0;
|
||||
|
||||
if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We've taken a dummy step exception from the kernel to ensure
|
||||
* that interrupts are re-enabled on the syscall path. Return back
|
||||
* to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
|
||||
* masked so that we can safely restore the mdscr and get on with
|
||||
* handling the syscall.
|
||||
*/
|
||||
regs->pstate |= PSR_D_BIT;
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
|
||||
NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler);
|
||||
|
||||
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf = esr_to_debug_fault_info(esr);
|
||||
unsigned long pc = instruction_pointer(regs);
|
||||
|
||||
if (cortex_a76_erratum_1463225_debug_handler(regs))
|
||||
return;
|
||||
|
||||
debug_exception_enter(regs);
|
||||
|
||||
if (user_mode(regs) && !is_ttbr0_addr(pc))
|
||||
|
Loading…
Reference in New Issue
Block a user