2019-06-03 13:44:50 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 19:49:29 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_IRQFLAGS_H
|
|
|
|
#define __ASM_IRQFLAGS_H
|
|
|
|
|
2019-01-31 22:58:50 +08:00
|
|
|
#include <asm/alternative.h>
|
2019-10-02 17:06:12 +08:00
|
|
|
#include <asm/barrier.h>
|
2012-03-05 19:49:29 +08:00
|
|
|
#include <asm/ptrace.h>
|
2019-01-31 22:58:50 +08:00
|
|
|
#include <asm/sysreg.h>
|
2012-03-05 19:49:29 +08:00
|
|
|
|
2017-11-02 20:12:35 +08:00
|
|
|
/*
|
|
|
|
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
|
|
|
|
* FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
|
|
|
|
* order:
|
|
|
|
* Masking debug exceptions causes all other exceptions to be masked too/
|
|
|
|
* Masking SError masks irq, but not debug exceptions. Masking irqs has no
|
|
|
|
* side effects for other flags. Keeping to this order makes it easier for
|
|
|
|
* entry.S to know which exceptions should be unmasked.
|
|
|
|
*
|
|
|
|
* FIQ is never expected, but we mask it when we disable debug exceptions, and
|
|
|
|
* unmask it at all other times.
|
|
|
|
*/
|
|
|
|
|
2012-03-05 19:49:29 +08:00
|
|
|
/*
|
|
|
|
* CPU interrupt mask handling.
|
|
|
|
*/
|
|
|
|
static inline void arch_local_irq_enable(void)
|
|
|
|
{
|
2019-06-11 17:38:11 +08:00
|
|
|
if (system_has_prio_mask_debugging()) {
|
|
|
|
u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
|
|
|
|
|
|
|
|
WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
|
|
|
|
}
|
|
|
|
|
2019-01-31 22:58:50 +08:00
|
|
|
asm volatile(ALTERNATIVE(
|
2019-10-02 17:06:12 +08:00
|
|
|
"msr daifclr, #2 // arch_local_irq_enable",
|
|
|
|
__msr_s(SYS_ICC_PMR_EL1, "%0"),
|
2019-01-31 22:58:50 +08:00
|
|
|
ARM64_HAS_IRQ_PRIO_MASKING)
|
2012-03-05 19:49:29 +08:00
|
|
|
:
|
2019-02-08 17:36:48 +08:00
|
|
|
: "r" ((unsigned long) GIC_PRIO_IRQON)
|
2012-03-05 19:49:29 +08:00
|
|
|
: "memory");
|
2019-10-02 17:06:12 +08:00
|
|
|
|
|
|
|
pmr_sync();
|
2012-03-05 19:49:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void arch_local_irq_disable(void)
|
|
|
|
{
|
2019-06-11 17:38:11 +08:00
|
|
|
if (system_has_prio_mask_debugging()) {
|
|
|
|
u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
|
|
|
|
|
|
|
|
WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
|
|
|
|
}
|
|
|
|
|
2019-01-31 22:58:50 +08:00
|
|
|
asm volatile(ALTERNATIVE(
|
|
|
|
"msr daifset, #2 // arch_local_irq_disable",
|
arm64: sysreg: Make mrs_s and msr_s macros work with Clang and LTO
Clang's integrated assembler does not allow assembly macros defined
in one inline asm block using the .macro directive to be used across
separate asm blocks. LLVM developers consider this a feature and not a
bug, recommending code refactoring:
https://bugs.llvm.org/show_bug.cgi?id=19749
As binutils doesn't allow macros to be redefined, this change uses
UNDEFINE_MRS_S and UNDEFINE_MSR_S to define corresponding macros
in-place and workaround gcc and clang limitations on redefining macros
across different assembler blocks.
Specifically, the current state after preprocessing looks like this:
asm volatile(".macro mXX_s ... .endm");
void f()
{
asm volatile("mXX_s a, b");
}
With GCC, it gives macro redefinition error because sysreg.h is included
in multiple source files, and assembler code for all of them is later
combined for LTO (I've seen an intermediate file with hundreds of
identical definitions).
With clang, it gives macro undefined error because clang doesn't allow
sharing macros between inline asm statements.
I also seem to remember catching another sort of undefined error with
GCC due to reordering of macro definition asm statement and generated
asm code for function that uses the macro.
The solution with defining and undefining for each use, while certainly
not elegant, satisfies both GCC and clang, LTO and non-LTO.
Co-developed-by: Alex Matveev <alxmtvv@gmail.com>
Co-developed-by: Yury Norov <ynorov@caviumnetworks.com>
Co-developed-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2019-04-25 00:55:37 +08:00
|
|
|
__msr_s(SYS_ICC_PMR_EL1, "%0"),
|
2019-01-31 22:58:50 +08:00
|
|
|
ARM64_HAS_IRQ_PRIO_MASKING)
|
2012-03-05 19:49:29 +08:00
|
|
|
:
|
2019-02-08 17:36:48 +08:00
|
|
|
: "r" ((unsigned long) GIC_PRIO_IRQOFF)
|
2012-03-05 19:49:29 +08:00
|
|
|
: "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save the current interrupt enable state.
|
|
|
|
*/
|
|
|
|
static inline unsigned long arch_local_save_flags(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2019-01-31 22:58:50 +08:00
|
|
|
|
|
|
|
asm volatile(ALTERNATIVE(
|
2019-06-11 17:38:10 +08:00
|
|
|
"mrs %0, daif",
|
|
|
|
__mrs_s("%0", SYS_ICC_PMR_EL1),
|
|
|
|
ARM64_HAS_IRQ_PRIO_MASKING)
|
|
|
|
: "=&r" (flags)
|
|
|
|
:
|
2012-03-05 19:49:29 +08:00
|
|
|
: "memory");
|
2019-01-31 22:58:50 +08:00
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2019-06-11 17:38:10 +08:00
|
|
|
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
|
|
|
asm volatile(ALTERNATIVE(
|
|
|
|
"and %w0, %w1, #" __stringify(PSR_I_BIT),
|
|
|
|
"eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON),
|
|
|
|
ARM64_HAS_IRQ_PRIO_MASKING)
|
|
|
|
: "=&r" (res)
|
|
|
|
: "r" ((int) flags)
|
|
|
|
: "memory");
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2020-08-21 16:40:49 +08:00
|
|
|
static inline int arch_irqs_disabled(void)
|
|
|
|
{
|
|
|
|
return arch_irqs_disabled_flags(arch_local_save_flags());
|
|
|
|
}
|
|
|
|
|
2019-01-31 22:58:50 +08:00
|
|
|
static inline unsigned long arch_local_irq_save(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
flags = arch_local_save_flags();
|
|
|
|
|
2019-06-11 17:38:10 +08:00
|
|
|
/*
|
|
|
|
* There are too many states with IRQs disabled, just keep the current
|
|
|
|
* state if interrupts are already disabled/masked.
|
|
|
|
*/
|
|
|
|
if (!arch_irqs_disabled_flags(flags))
|
|
|
|
arch_local_irq_disable();
|
2019-01-31 22:58:50 +08:00
|
|
|
|
2012-03-05 19:49:29 +08:00
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* restore saved IRQ state
|
|
|
|
*/
|
|
|
|
static inline void arch_local_irq_restore(unsigned long flags)
|
|
|
|
{
|
2019-01-31 22:58:50 +08:00
|
|
|
asm volatile(ALTERNATIVE(
|
2019-10-02 17:06:12 +08:00
|
|
|
"msr daif, %0",
|
|
|
|
__msr_s(SYS_ICC_PMR_EL1, "%0"),
|
|
|
|
ARM64_HAS_IRQ_PRIO_MASKING)
|
2019-01-31 22:58:50 +08:00
|
|
|
:
|
2019-06-11 17:38:07 +08:00
|
|
|
: "r" (flags)
|
2019-01-31 22:58:50 +08:00
|
|
|
: "memory");
|
2019-10-02 17:06:12 +08:00
|
|
|
|
|
|
|
pmr_sync();
|
2012-03-05 19:49:29 +08:00
|
|
|
}
|
|
|
|
|
2019-07-09 00:36:40 +08:00
|
|
|
#endif /* __ASM_IRQFLAGS_H */
|