mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
dda5f312bb
The sync_*() ops on arch/arm are defined in terms of the regular bitops
with no special handling. This is not correct, as UP kernels elide
barriers for the fully-ordered operations, and so the required ordering
is lost when such UP kernels are run under a hypervsior on an SMP
system.
Fix this by defining sync ops with the required barriers.
Note: On 32-bit arm, the sync_*() ops are currently only used by Xen,
which requires ARMv7, but the semantics can be implemented for ARMv6+.
Fixes: e54d2f6152
("xen/arm: sync_bitops")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-2-mark.rutland@arm.com
116 lines
2.3 KiB
C
116 lines
2.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#include <asm/assembler.h>
|
|
#include <asm/unwind.h>
|
|
|
|
#if __LINUX_ARM_ARCH__ >= 6
|
|
.macro bitop, name, instr
|
|
ENTRY( \name )
|
|
UNWIND( .fnstart )
|
|
ands ip, r1, #3
|
|
strbne r1, [ip] @ assert word-aligned
|
|
mov r2, #1
|
|
and r3, r0, #31 @ Get bit offset
|
|
mov r0, r0, lsr #5
|
|
add r1, r1, r0, lsl #2 @ Get word offset
|
|
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
|
|
.arch_extension mp
|
|
ALT_SMP(W(pldw) [r1])
|
|
ALT_UP(W(nop))
|
|
#endif
|
|
mov r3, r2, lsl r3
|
|
1: ldrex r2, [r1]
|
|
\instr r2, r2, r3
|
|
strex r0, r2, [r1]
|
|
cmp r0, #0
|
|
bne 1b
|
|
bx lr
|
|
UNWIND( .fnend )
|
|
ENDPROC(\name )
|
|
.endm
|
|
|
|
.macro __testop, name, instr, store, barrier
|
|
ENTRY( \name )
|
|
UNWIND( .fnstart )
|
|
ands ip, r1, #3
|
|
strbne r1, [ip] @ assert word-aligned
|
|
mov r2, #1
|
|
and r3, r0, #31 @ Get bit offset
|
|
mov r0, r0, lsr #5
|
|
add r1, r1, r0, lsl #2 @ Get word offset
|
|
mov r3, r2, lsl r3 @ create mask
|
|
\barrier
|
|
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
|
|
.arch_extension mp
|
|
ALT_SMP(W(pldw) [r1])
|
|
ALT_UP(W(nop))
|
|
#endif
|
|
1: ldrex r2, [r1]
|
|
ands r0, r2, r3 @ save old value of bit
|
|
\instr r2, r2, r3 @ toggle bit
|
|
strex ip, r2, [r1]
|
|
cmp ip, #0
|
|
bne 1b
|
|
\barrier
|
|
cmp r0, #0
|
|
movne r0, #1
|
|
2: bx lr
|
|
UNWIND( .fnend )
|
|
ENDPROC(\name )
|
|
.endm
|
|
|
|
.macro testop, name, instr, store
|
|
__testop \name, \instr, \store, smp_dmb
|
|
.endm
|
|
|
|
.macro sync_testop, name, instr, store
|
|
__testop \name, \instr, \store, __smp_dmb
|
|
.endm
|
|
#else
|
|
.macro bitop, name, instr
|
|
ENTRY( \name )
|
|
UNWIND( .fnstart )
|
|
ands ip, r1, #3
|
|
strbne r1, [ip] @ assert word-aligned
|
|
and r2, r0, #31
|
|
mov r0, r0, lsr #5
|
|
mov r3, #1
|
|
mov r3, r3, lsl r2
|
|
save_and_disable_irqs ip
|
|
ldr r2, [r1, r0, lsl #2]
|
|
\instr r2, r2, r3
|
|
str r2, [r1, r0, lsl #2]
|
|
restore_irqs ip
|
|
ret lr
|
|
UNWIND( .fnend )
|
|
ENDPROC(\name )
|
|
.endm
|
|
|
|
/**
|
|
* testop - implement a test_and_xxx_bit operation.
|
|
* @instr: operational instruction
|
|
* @store: store instruction
|
|
*
|
|
* Note: we can trivially conditionalise the store instruction
|
|
* to avoid dirtying the data cache.
|
|
*/
|
|
.macro testop, name, instr, store
|
|
ENTRY( \name )
|
|
UNWIND( .fnstart )
|
|
ands ip, r1, #3
|
|
strbne r1, [ip] @ assert word-aligned
|
|
and r3, r0, #31
|
|
mov r0, r0, lsr #5
|
|
save_and_disable_irqs ip
|
|
ldr r2, [r1, r0, lsl #2]!
|
|
mov r0, #1
|
|
tst r2, r0, lsl r3
|
|
\instr r2, r2, r0, lsl r3
|
|
\store r2, [r1]
|
|
moveq r0, #0
|
|
restore_irqs ip
|
|
ret lr
|
|
UNWIND( .fnend )
|
|
ENDPROC(\name )
|
|
.endm
|
|
#endif
|