mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-09 15:24:32 +08:00
8bf0a8048b
When Priority Mask Hint Enable (PMHE) == 0b1, the GIC may use the PMR value to determine whether to signal an IRQ to a PE, and consequently after a change to the PMR value, a DSB SY may be required to ensure that interrupts are signalled to a CPU in finite time. When PMHE == 0b0, interrupts are always signalled to the relevant PE, and all masking occurs locally, without requiring a DSB SY. Since commit:f226650494
("arm64: Relax ICC_PMR_EL1 accesses when ICC_CTLR_EL1.PMHE is clear") ... we handle this dynamically: in most cases a static key is used to determine whether to issue a DSB SY, but the entry code must read from ICC_CTLR_EL1 as static keys aren't accessible from plain assembly. It would be much nicer to use an alternative instruction sequence for the DSB, as this would avoid the need to read from ICC_CTLR_EL1 in the entry code, and for most other code this will result in simpler code generation with fewer instructions and fewer branches. This patch adds a new ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap which is only set when ICC_CTLR_EL1.PMHE == 0b0 (and GIC priority masking is in use). This allows us to replace the existing users of the `gic_pmr_sync` static key with alternative sequences which default to a DSB SY and are relaxed to a NOP when PMHE is not in use. The entry assembly management of the PMR is slightly restructured to use a branch (rather than multiple NOPs) when priority masking is not in use. This is more in keeping with other alternatives in the entry assembly, and permits the use of a separate alternatives for the PMHE-dependent DSB SY (and removal of the conditional branch this currently requires). For consistency I've adjusted both the save and restore paths. According to bloat-o-meter, when building defconfig + CONFIG_ARM64_PSEUDO_NMI=y this shrinks the kernel text by ~4KiB: | add/remove: 4/2 grow/shrink: 42/310 up/down: 332/-5032 (-4700) The resulting vmlinux is ~66KiB smaller, though the resulting Image size is unchanged due to padding and alignment: | [mark@lakrids:~/src/linux]% ls -al vmlinux-* | -rwxr-xr-x 1 mark mark 137508344 Jan 17 14:11 vmlinux-after | -rwxr-xr-x 1 mark mark 137575440 Jan 17 13:49 vmlinux-before | [mark@lakrids:~/src/linux]% ls -al Image-* | -rw-r--r-- 1 mark mark 38777344 Jan 17 14:11 Image-after | -rw-r--r-- 1 mark mark 38777344 Jan 17 13:49 Image-before Prior to this patch we did not verify the state of ICC_CTLR_EL1.PMHE on secondary CPUs. As of this patch this is verified by the cpufeature code when using GIC priority masking (i.e. when using pseudo-NMIs). Note that since commit:7e3a57fa6c
("arm64: Document ICC_CTLR_EL3.PMHE setting requirements") ... Documentation/arm64/booting.rst specifies: | - ICC_CTLR_EL3.PMHE (bit 6) must be set to the same value across | all CPUs the kernel is executing on, and must stay constant | for the lifetime of the kernel. ... so that should not adversely affect any compliant systems, and as we'll only check for the absense of PMHE when using pseudo-NMIs, this will only fire when such mismatch will adversely affect the system. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20230130145429.903791-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
200 lines
4.8 KiB
C
200 lines
4.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* arch/arm64/include/asm/arch_gicv3.h
|
|
*
|
|
* Copyright (C) 2015 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_ARCH_GICV3_H
|
|
#define __ASM_ARCH_GICV3_H
|
|
|
|
#include <asm/sysreg.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/irqchip/arm-gic-common.h>
|
|
#include <linux/stringify.h>
|
|
#include <asm/barrier.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
#define read_gicreg(r) read_sysreg_s(SYS_ ## r)
|
|
#define write_gicreg(v, r) write_sysreg_s(v, SYS_ ## r)
|
|
|
|
/*
|
|
* Low-level accessors
|
|
*
|
|
* These system registers are 32 bits, but we make sure that the compiler
|
|
* sets the GP register's most significant bits to 0 with an explicit cast.
|
|
*/
|
|
|
|
static __always_inline void gic_write_dir(u32 irq)
|
|
{
|
|
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
|
|
isb();
|
|
}
|
|
|
|
static inline u64 gic_read_iar_common(void)
|
|
{
|
|
u64 irqstat;
|
|
|
|
irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
|
|
dsb(sy);
|
|
return irqstat;
|
|
}
|
|
|
|
/*
|
|
* Cavium ThunderX erratum 23154
|
|
*
|
|
* The gicv3 of ThunderX requires a modified version for reading the
|
|
* IAR status to ensure data synchronization (access to icc_iar1_el1
|
|
* is not sync'ed before and after).
|
|
*
|
|
* Erratum 38545
|
|
*
|
|
* When a IAR register read races with a GIC interrupt RELEASE event,
|
|
* GIC-CPU interface could wrongly return a valid INTID to the CPU
|
|
* for an interrupt that is already released(non activated) instead of 0x3ff.
|
|
*
|
|
* To workaround this, return a valid interrupt ID only if there is a change
|
|
* in the active priority list after the IAR read.
|
|
*
|
|
* Common function used for both the workarounds since,
|
|
* 1. On Thunderx 88xx 1.x both erratas are applicable.
|
|
* 2. Having extra nops doesn't add any side effects for Silicons where
|
|
* erratum 23154 is not applicable.
|
|
*/
|
|
static inline u64 gic_read_iar_cavium_thunderx(void)
|
|
{
|
|
u64 irqstat, apr;
|
|
|
|
apr = read_sysreg_s(SYS_ICC_AP1R0_EL1);
|
|
nops(8);
|
|
irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
|
|
nops(4);
|
|
mb();
|
|
|
|
/* Max priority groups implemented is only 32 */
|
|
if (likely(apr != read_sysreg_s(SYS_ICC_AP1R0_EL1)))
|
|
return irqstat;
|
|
|
|
return 0x3ff;
|
|
}
|
|
|
|
static inline void gic_write_ctlr(u32 val)
|
|
{
|
|
write_sysreg_s(val, SYS_ICC_CTLR_EL1);
|
|
isb();
|
|
}
|
|
|
|
static inline u32 gic_read_ctlr(void)
|
|
{
|
|
return read_sysreg_s(SYS_ICC_CTLR_EL1);
|
|
}
|
|
|
|
static inline void gic_write_grpen1(u32 val)
|
|
{
|
|
write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1);
|
|
isb();
|
|
}
|
|
|
|
static inline void gic_write_sgi1r(u64 val)
|
|
{
|
|
write_sysreg_s(val, SYS_ICC_SGI1R_EL1);
|
|
}
|
|
|
|
static inline u32 gic_read_sre(void)
|
|
{
|
|
return read_sysreg_s(SYS_ICC_SRE_EL1);
|
|
}
|
|
|
|
static inline void gic_write_sre(u32 val)
|
|
{
|
|
write_sysreg_s(val, SYS_ICC_SRE_EL1);
|
|
isb();
|
|
}
|
|
|
|
static inline void gic_write_bpr1(u32 val)
|
|
{
|
|
write_sysreg_s(val, SYS_ICC_BPR1_EL1);
|
|
}
|
|
|
|
static inline u32 gic_read_pmr(void)
|
|
{
|
|
return read_sysreg_s(SYS_ICC_PMR_EL1);
|
|
}
|
|
|
|
static __always_inline void gic_write_pmr(u32 val)
|
|
{
|
|
write_sysreg_s(val, SYS_ICC_PMR_EL1);
|
|
}
|
|
|
|
static inline u32 gic_read_rpr(void)
|
|
{
|
|
return read_sysreg_s(SYS_ICC_RPR_EL1);
|
|
}
|
|
|
|
#define gic_read_typer(c) readq_relaxed(c)
|
|
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
|
|
#define gic_read_lpir(c) readq_relaxed(c)
|
|
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
|
|
|
|
#define gic_flush_dcache_to_poc(a,l) \
|
|
dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
|
|
|
|
#define gits_read_baser(c) readq_relaxed(c)
|
|
#define gits_write_baser(v, c) writeq_relaxed(v, c)
|
|
|
|
#define gits_read_cbaser(c) readq_relaxed(c)
|
|
#define gits_write_cbaser(v, c) writeq_relaxed(v, c)
|
|
|
|
#define gits_write_cwriter(v, c) writeq_relaxed(v, c)
|
|
|
|
#define gicr_read_propbaser(c) readq_relaxed(c)
|
|
#define gicr_write_propbaser(v, c) writeq_relaxed(v, c)
|
|
|
|
#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
|
|
#define gicr_read_pendbaser(c) readq_relaxed(c)
|
|
|
|
#define gicr_write_vpropbaser(v, c) writeq_relaxed(v, c)
|
|
#define gicr_read_vpropbaser(c) readq_relaxed(c)
|
|
|
|
#define gicr_write_vpendbaser(v, c) writeq_relaxed(v, c)
|
|
#define gicr_read_vpendbaser(c) readq_relaxed(c)
|
|
|
|
static inline bool gic_prio_masking_enabled(void)
|
|
{
|
|
return system_uses_irq_prio_masking();
|
|
}
|
|
|
|
static inline void gic_pmr_mask_irqs(void)
|
|
{
|
|
BUILD_BUG_ON(GICD_INT_DEF_PRI < (__GIC_PRIO_IRQOFF |
|
|
GIC_PRIO_PSR_I_SET));
|
|
BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
|
|
/*
|
|
* Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
|
|
* and non-secure PMR accesses are not subject to the shifts that
|
|
* are applied to IRQ priorities
|
|
*/
|
|
BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
|
|
/*
|
|
* Same situation as above, but now we make sure that we can mask
|
|
* regular interrupts.
|
|
*/
|
|
BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) < (__GIC_PRIO_IRQOFF_NS |
|
|
GIC_PRIO_PSR_I_SET));
|
|
gic_write_pmr(GIC_PRIO_IRQOFF);
|
|
}
|
|
|
|
static inline void gic_arch_enable_irqs(void)
|
|
{
|
|
asm volatile ("msr daifclr, #3" : : : "memory");
|
|
}
|
|
|
|
static inline bool gic_has_relaxed_pmr_sync(void)
|
|
{
|
|
return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC);
|
|
}
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif /* __ASM_ARCH_GICV3_H */
|