2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 11:13:58 +08:00

powerpc/spinlocks: Rename SPLPAR-only spinlocks

The __rw_yield and __spin_yield locks only pertain to SPLPAR mode.
Rename them to make this relationship obvious.

Signed-off-by: Christopher M. Riedl <cmr@informatik.wtf>
Reviewed-by: Andrew Donnellan <ajd@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190813031314.1828-3-cmr@informatik.wtf
This commit is contained in:
Christopher M. Riedl 2019-08-12 22:13:13 -05:00 committed by Michael Ellerman
parent d57b78353a
commit 31391ff7ea
2 changed files with 7 additions and 5 deletions

View File

@ -101,8 +101,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock);
void splpar_spin_yield(arch_spinlock_t *lock);
void splpar_rw_yield(arch_rwlock_t *lock);
#define __spin_yield(x) splpar_spin_yield(x)
#define __rw_yield(x) splpar_rw_yield(x)
#else /* SPLPAR */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()

View File

@ -18,7 +18,7 @@
#include <asm/hvcall.h>
#include <asm/smp.h>
void __spin_yield(arch_spinlock_t *lock)
void splpar_spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@ -36,14 +36,14 @@ void __spin_yield(arch_spinlock_t *lock)
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
}
EXPORT_SYMBOL_GPL(__spin_yield);
EXPORT_SYMBOL_GPL(splpar_spin_yield);
/*
* Waiting for a read lock or a write lock on a rwlock...
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
void __rw_yield(arch_rwlock_t *rw)
void splpar_rw_yield(arch_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;