mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
locking/qspinlock: Revert to test-and-set on hypervisors
When we detect a hypervisor (!paravirt, see qspinlock paravirt support patches), revert to a simple test-and-set lock to avoid the horrors of queue preemption. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Daniel J Blueman <daniel@numascale.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paolo Bonzini <paolo.bonzini@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1429901803-29771-8-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
2c83e8e949
commit
2aa79af642
@ -1,6 +1,7 @@
|
||||
#ifndef _ASM_X86_QSPINLOCK_H
|
||||
#define _ASM_X86_QSPINLOCK_H
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm-generic/qspinlock_types.h>
|
||||
|
||||
#define queued_spin_unlock queued_spin_unlock
|
||||
@ -15,6 +16,19 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
smp_store_release((u8 *)lock, 0);
|
||||
}
|
||||
|
||||
#define virt_queued_spin_lock virt_queued_spin_lock
|
||||
|
||||
static inline bool virt_queued_spin_lock(struct qspinlock *lock)
|
||||
{
|
||||
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
return false;
|
||||
|
||||
while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
|
||||
cpu_relax();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#include <asm-generic/qspinlock.h>
|
||||
|
||||
#endif /* _ASM_X86_QSPINLOCK_H */
|
||||
|
@ -111,6 +111,13 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
#ifndef virt_queued_spin_lock
|
||||
static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initializier
|
||||
*/
|
||||
|
@ -249,6 +249,9 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
|
||||
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
|
||||
|
||||
if (virt_queued_spin_lock(lock))
|
||||
return;
|
||||
|
||||
/*
|
||||
* wait for in-progress pending->locked hand-overs
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user