mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-14 22:44:27 +08:00
9eae588529
Just like we have helpers for WFI and WFE, add the WFxT versions. Note that the encoding is that reported by objdump, as no currrent toolchain knows about these instructions yet. Signed-off-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/20220419182755.601427-10-maz@kernel.org
217 lines
5.7 KiB
C
217 lines
5.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Based on arch/arm/include/asm/barrier.h
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#ifndef __ASM_BARRIER_H
|
|
#define __ASM_BARRIER_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/kasan-checks.h>
|
|
|
|
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
|
|
#define nops(n) asm volatile(__nops(n))
|
|
|
|
#define sev() asm volatile("sev" : : : "memory")
|
|
#define wfe() asm volatile("wfe" : : : "memory")
|
|
#define wfet(val) asm volatile("msr s0_3_c1_c0_0, %0" \
|
|
: : "r" (val) : "memory")
|
|
#define wfi() asm volatile("wfi" : : : "memory")
|
|
#define wfit(val) asm volatile("msr s0_3_c1_c0_1, %0" \
|
|
: : "r" (val) : "memory")
|
|
|
|
#define isb() asm volatile("isb" : : : "memory")
|
|
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
|
|
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
|
|
|
#define psb_csync() asm volatile("hint #17" : : : "memory")
|
|
#define __tsb_csync() asm volatile("hint #18" : : : "memory")
|
|
#define csdb() asm volatile("hint #20" : : : "memory")
|
|
|
|
/*
|
|
* Data Gathering Hint:
|
|
* This instruction prevents merging memory accesses with Normal-NC or
|
|
* Device-GRE attributes before the hint instruction with any memory accesses
|
|
* appearing after the hint instruction.
|
|
*/
|
|
#define dgh() asm volatile("hint #6" : : : "memory")
|
|
|
|
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
|
#define pmr_sync() \
|
|
do { \
|
|
extern struct static_key_false gic_pmr_sync; \
|
|
\
|
|
if (static_branch_unlikely(&gic_pmr_sync)) \
|
|
dsb(sy); \
|
|
} while(0)
|
|
#else
|
|
#define pmr_sync() do {} while (0)
|
|
#endif
|
|
|
|
#define mb() dsb(sy)
|
|
#define rmb() dsb(ld)
|
|
#define wmb() dsb(st)
|
|
|
|
#define dma_mb() dmb(osh)
|
|
#define dma_rmb() dmb(oshld)
|
|
#define dma_wmb() dmb(oshst)
|
|
|
|
#define io_stop_wc() dgh()
|
|
|
|
#define tsb_csync() \
|
|
do { \
|
|
/* \
|
|
* CPUs affected by Arm Erratum 2054223 or 2067961 needs \
|
|
* another TSB to ensure the trace is flushed. The barriers \
|
|
* don't have to be strictly back to back, as long as the \
|
|
* CPU is in trace prohibited state. \
|
|
*/ \
|
|
if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
|
|
__tsb_csync(); \
|
|
__tsb_csync(); \
|
|
} while (0)
|
|
|
|
/*
|
|
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
|
|
* and 0 otherwise.
|
|
*/
|
|
#define array_index_mask_nospec array_index_mask_nospec
|
|
static inline unsigned long array_index_mask_nospec(unsigned long idx,
|
|
unsigned long sz)
|
|
{
|
|
unsigned long mask;
|
|
|
|
asm volatile(
|
|
" cmp %1, %2\n"
|
|
" sbc %0, xzr, xzr\n"
|
|
: "=r" (mask)
|
|
: "r" (idx), "Ir" (sz)
|
|
: "cc");
|
|
|
|
csdb();
|
|
return mask;
|
|
}
|
|
|
|
/*
|
|
* Ensure that reads of the counter are treated the same as memory reads
|
|
* for the purposes of ordering by subsequent memory barriers.
|
|
*
|
|
* This insanity brought to you by speculative system register reads,
|
|
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
|
|
*
|
|
* https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
|
|
*/
|
|
#define arch_counter_enforce_ordering(val) do { \
|
|
u64 tmp, _val = (val); \
|
|
\
|
|
asm volatile( \
|
|
" eor %0, %1, %1\n" \
|
|
" add %0, sp, %0\n" \
|
|
" ldr xzr, [%0]" \
|
|
: "=r" (tmp) : "r" (_val)); \
|
|
} while (0)
|
|
|
|
#define __smp_mb() dmb(ish)
|
|
#define __smp_rmb() dmb(ishld)
|
|
#define __smp_wmb() dmb(ishst)
|
|
|
|
#define __smp_store_release(p, v) \
|
|
do { \
|
|
typeof(p) __p = (p); \
|
|
union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
|
|
{ .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
|
|
compiletime_assert_atomic_type(*p); \
|
|
kasan_check_write(__p, sizeof(*p)); \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
asm volatile ("stlrb %w1, %0" \
|
|
: "=Q" (*__p) \
|
|
: "r" (*(__u8 *)__u.__c) \
|
|
: "memory"); \
|
|
break; \
|
|
case 2: \
|
|
asm volatile ("stlrh %w1, %0" \
|
|
: "=Q" (*__p) \
|
|
: "r" (*(__u16 *)__u.__c) \
|
|
: "memory"); \
|
|
break; \
|
|
case 4: \
|
|
asm volatile ("stlr %w1, %0" \
|
|
: "=Q" (*__p) \
|
|
: "r" (*(__u32 *)__u.__c) \
|
|
: "memory"); \
|
|
break; \
|
|
case 8: \
|
|
asm volatile ("stlr %1, %0" \
|
|
: "=Q" (*__p) \
|
|
: "r" (*(__u64 *)__u.__c) \
|
|
: "memory"); \
|
|
break; \
|
|
} \
|
|
} while (0)
|
|
|
|
#define __smp_load_acquire(p) \
|
|
({ \
|
|
union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
|
|
typeof(p) __p = (p); \
|
|
compiletime_assert_atomic_type(*p); \
|
|
kasan_check_read(__p, sizeof(*p)); \
|
|
switch (sizeof(*p)) { \
|
|
case 1: \
|
|
asm volatile ("ldarb %w0, %1" \
|
|
: "=r" (*(__u8 *)__u.__c) \
|
|
: "Q" (*__p) : "memory"); \
|
|
break; \
|
|
case 2: \
|
|
asm volatile ("ldarh %w0, %1" \
|
|
: "=r" (*(__u16 *)__u.__c) \
|
|
: "Q" (*__p) : "memory"); \
|
|
break; \
|
|
case 4: \
|
|
asm volatile ("ldar %w0, %1" \
|
|
: "=r" (*(__u32 *)__u.__c) \
|
|
: "Q" (*__p) : "memory"); \
|
|
break; \
|
|
case 8: \
|
|
asm volatile ("ldar %0, %1" \
|
|
: "=r" (*(__u64 *)__u.__c) \
|
|
: "Q" (*__p) : "memory"); \
|
|
break; \
|
|
} \
|
|
(typeof(*p))__u.__val; \
|
|
})
|
|
|
|
#define smp_cond_load_relaxed(ptr, cond_expr) \
|
|
({ \
|
|
typeof(ptr) __PTR = (ptr); \
|
|
__unqual_scalar_typeof(*ptr) VAL; \
|
|
for (;;) { \
|
|
VAL = READ_ONCE(*__PTR); \
|
|
if (cond_expr) \
|
|
break; \
|
|
__cmpwait_relaxed(__PTR, VAL); \
|
|
} \
|
|
(typeof(*ptr))VAL; \
|
|
})
|
|
|
|
#define smp_cond_load_acquire(ptr, cond_expr) \
|
|
({ \
|
|
typeof(ptr) __PTR = (ptr); \
|
|
__unqual_scalar_typeof(*ptr) VAL; \
|
|
for (;;) { \
|
|
VAL = smp_load_acquire(__PTR); \
|
|
if (cond_expr) \
|
|
break; \
|
|
__cmpwait_relaxed(__PTR, VAL); \
|
|
} \
|
|
(typeof(*ptr))VAL; \
|
|
})
|
|
|
|
#include <asm-generic/barrier.h>
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_BARRIER_H */
|