mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
00b4c90787
ARMv6K CPUs have SEV (send event) and WFE (wait for event) instructions which allow the CPU clock to be suspended until another CPU issues a SEV, rather than spinning on the lock wasting power. Make use of these instructions. Note that WFE does not wait if an event has been sent since the last WFE cleared the event status, so although it may look racy, the instruction implementation ensures that these are dealt with. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
202 lines
3.7 KiB
C
202 lines
3.7 KiB
C
#ifndef __ASM_SPINLOCK_H
|
|
#define __ASM_SPINLOCK_H
|
|
|
|
#if __LINUX_ARM_ARCH__ < 6
|
|
#error SMP not supported on pre-ARMv6 CPUs
|
|
#endif
|
|
|
|
/*
|
|
* ARMv6 Spin-locking.
|
|
*
|
|
* We exclusively read the old value. If it is zero, we may have
|
|
* won the lock, so we try exclusively storing it. A memory barrier
|
|
* is required after we get a lock, and before we release it, because
|
|
* V6 CPUs are assumed to have weakly ordered memory.
|
|
*
|
|
* Unlocked value: 0
|
|
* Locked value: 1
|
|
*/
|
|
|
|
#define __raw_spin_is_locked(x) ((x)->lock != 0)
|
|
#define __raw_spin_unlock_wait(lock) \
|
|
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
|
|
|
|
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
|
|
|
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldrex %0, [%1]\n"
|
|
" teq %0, #0\n"
|
|
#ifdef CONFIG_CPU_32v6K
|
|
" wfene\n"
|
|
#endif
|
|
" strexeq %0, %2, [%1]\n"
|
|
" teqeq %0, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (tmp)
|
|
: "r" (&lock->lock), "r" (1)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
}
|
|
|
|
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
__asm__ __volatile__(
|
|
" ldrex %0, [%1]\n"
|
|
" teq %0, #0\n"
|
|
" strexeq %0, %2, [%1]"
|
|
: "=&r" (tmp)
|
|
: "r" (&lock->lock), "r" (1)
|
|
: "cc");
|
|
|
|
if (tmp == 0) {
|
|
smp_mb();
|
|
return 1;
|
|
} else {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|
{
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__(
|
|
" str %1, [%0]\n"
|
|
#ifdef CONFIG_CPU_32v6K
|
|
" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
|
|
" sev"
|
|
#endif
|
|
:
|
|
: "r" (&lock->lock), "r" (0)
|
|
: "cc");
|
|
}
|
|
|
|
/*
|
|
* RWLOCKS
|
|
*
|
|
*
|
|
* Write locks are easy - we just set bit 31. When unlocking, we can
|
|
* just write zero since the lock is exclusively held.
|
|
*/
|
|
#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
|
|
|
|
static inline void __raw_write_lock(raw_rwlock_t *rw)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldrex %0, [%1]\n"
|
|
" teq %0, #0\n"
|
|
#ifdef CONFIG_CPU_32v6K
|
|
" wfene\n"
|
|
#endif
|
|
" strexeq %0, %2, [%1]\n"
|
|
" teq %0, #0\n"
|
|
" bne 1b"
|
|
: "=&r" (tmp)
|
|
: "r" (&rw->lock), "r" (0x80000000)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
}
|
|
|
|
static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldrex %0, [%1]\n"
|
|
" teq %0, #0\n"
|
|
" strexeq %0, %2, [%1]"
|
|
: "=&r" (tmp)
|
|
: "r" (&rw->lock), "r" (0x80000000)
|
|
: "cc");
|
|
|
|
if (tmp == 0) {
|
|
smp_mb();
|
|
return 1;
|
|
} else {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
|
{
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__(
|
|
"str %1, [%0]\n"
|
|
#ifdef CONFIG_CPU_32v6K
|
|
" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
|
|
" sev\n"
|
|
#endif
|
|
:
|
|
: "r" (&rw->lock), "r" (0)
|
|
: "cc");
|
|
}
|
|
|
|
/*
|
|
* Read locks are a bit more hairy:
|
|
* - Exclusively load the lock value.
|
|
* - Increment it.
|
|
* - Store new lock value if positive, and we still own this location.
|
|
* If the value is negative, we've already failed.
|
|
* - If we failed to store the value, we want a negative result.
|
|
* - If we failed, try again.
|
|
* Unlocking is similarly hairy. We may have multiple read locks
|
|
* currently active. However, we know we won't have any write
|
|
* locks.
|
|
*/
|
|
static inline void __raw_read_lock(raw_rwlock_t *rw)
|
|
{
|
|
unsigned long tmp, tmp2;
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldrex %0, [%2]\n"
|
|
" adds %0, %0, #1\n"
|
|
" strexpl %1, %0, [%2]\n"
|
|
#ifdef CONFIG_CPU_32v6K
|
|
" wfemi\n"
|
|
#endif
|
|
" rsbpls %0, %1, #0\n"
|
|
" bmi 1b"
|
|
: "=&r" (tmp), "=&r" (tmp2)
|
|
: "r" (&rw->lock)
|
|
: "cc");
|
|
|
|
smp_mb();
|
|
}
|
|
|
|
static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
|
{
|
|
unsigned long tmp, tmp2;
|
|
|
|
smp_mb();
|
|
|
|
__asm__ __volatile__(
|
|
"1: ldrex %0, [%2]\n"
|
|
" sub %0, %0, #1\n"
|
|
" strex %1, %0, [%2]\n"
|
|
" teq %1, #0\n"
|
|
" bne 1b"
|
|
#ifdef CONFIG_CPU_32v6K
|
|
"\n cmp %0, #0\n"
|
|
" mcreq p15, 0, %0, c7, c10, 4\n"
|
|
" seveq"
|
|
#endif
|
|
: "=&r" (tmp), "=&r" (tmp2)
|
|
: "r" (&rw->lock)
|
|
: "cc");
|
|
}
|
|
|
|
#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
|
|
|
|
#endif /* __ASM_SPINLOCK_H */
|