linux/arch/parisc/include/asm/spinlock.h
Arnd Bergmann f98a3dccfc locking: Remove spin_lock_flags() etc
parisc, ia64 and powerpc32 are the only remaining architectures that
provide custom arch_{spin,read,write}_lock_flags() functions, which are
meant to re-enable interrupts while waiting for a spinlock.

However, none of these can actually run into this codepath, because
it is only called on architectures without CONFIG_GENERIC_LOCKBREAK,
or when CONFIG_DEBUG_LOCK_ALLOC is set without CONFIG_LOCKDEP, and none
of those combinations are possible on the three architectures.

Going back in the git history, it appears that arch/mn10300 may have
been able to run into this code path, but there is a good chance that
it never worked. On the architectures that still exist, it was
already impossible to hit back in 2008 after the introduction of
CONFIG_GENERIC_LOCKBREAK, and possibly earlier.

As this is all dead code, just remove it and the helper functions built
around it. For arch/ia64, the inline asm could be cleaned up, but
it seems safer to leave it untouched.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Helge Deller <deller@gmx.de>  # parisc
Link: https://lore.kernel.org/r/20211022120058.1031690-1-arnd@kernel.org
2021-10-30 16:37:28 +02:00

135 lines
2.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#include <asm/barrier.h>
#include <asm/ldcw.h>
#include <asm/processor.h>
#include <asm/spinlock_types.h>
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return READ_ONCE(*a) == 0;
}
static inline void arch_spin_lock(arch_spinlock_t *x)
{
volatile unsigned int *a;
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
continue;
}
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
a = __ldcw_align(x);
/* Release with ordered store. */
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
a = __ldcw_align(x);
return __ldcw(a) != 0;
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
*/
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*/
if (rw->counter > 0) {
rw->counter--;
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
return ret;
}
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*/
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
rw->counter = 0;
ret = 1;
}
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
return ret;
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (!arch_read_trylock(rw))
cpu_relax();
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (!arch_write_trylock(rw))
cpu_relax();
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter++;
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
}
#endif /* __ASM_SPINLOCK_H */