mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 03:33:59 +08:00
bdd4e85dc3
Create a new CONFIG_PREEMPT_COUNT that handles the inc/dec of preempt count offset independently. So that the offset can be updated by preempt_disable() and preempt_enable() even without the need for CONFIG_PREEMPT beeing set. This prepares to make CONFIG_DEBUG_SPINLOCK_SLEEP working with !CONFIG_PREEMPT where it currently doesn't detect code that sleeps inside explicit preemption disabled sections. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
100 lines
2.2 KiB
C
100 lines
2.2 KiB
C
#ifndef __LINUX_BIT_SPINLOCK_H
|
|
#define __LINUX_BIT_SPINLOCK_H
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/preempt.h>
|
|
#include <asm/atomic.h>
|
|
|
|
/*
|
|
* bit-based spin_lock()
|
|
*
|
|
* Don't use this unless you really need to: spin_lock() and spin_unlock()
|
|
* are significantly faster.
|
|
*/
|
|
static inline void bit_spin_lock(int bitnum, unsigned long *addr)
|
|
{
|
|
/*
|
|
* Assuming the lock is uncontended, this never enters
|
|
* the body of the outer loop. If it is contended, then
|
|
* within the inner loop a non-atomic test is used to
|
|
* busywait with less bus contention for a good time to
|
|
* attempt to acquire the lock bit.
|
|
*/
|
|
preempt_disable();
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
|
|
preempt_enable();
|
|
do {
|
|
cpu_relax();
|
|
} while (test_bit(bitnum, addr));
|
|
preempt_disable();
|
|
}
|
|
#endif
|
|
__acquire(bitlock);
|
|
}
|
|
|
|
/*
|
|
* Return true if it was acquired
|
|
*/
|
|
static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
|
|
{
|
|
preempt_disable();
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
|
|
preempt_enable();
|
|
return 0;
|
|
}
|
|
#endif
|
|
__acquire(bitlock);
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* bit-based spin_unlock()
|
|
*/
|
|
static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
|
|
{
|
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
|
BUG_ON(!test_bit(bitnum, addr));
|
|
#endif
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
clear_bit_unlock(bitnum, addr);
|
|
#endif
|
|
preempt_enable();
|
|
__release(bitlock);
|
|
}
|
|
|
|
/*
|
|
* bit-based spin_unlock()
|
|
* non-atomic version, which can be used eg. if the bit lock itself is
|
|
* protecting the rest of the flags in the word.
|
|
*/
|
|
static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
|
|
{
|
|
#ifdef CONFIG_DEBUG_SPINLOCK
|
|
BUG_ON(!test_bit(bitnum, addr));
|
|
#endif
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
__clear_bit_unlock(bitnum, addr);
|
|
#endif
|
|
preempt_enable();
|
|
__release(bitlock);
|
|
}
|
|
|
|
/*
|
|
* Return true if the lock is held.
|
|
*/
|
|
static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
|
|
{
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
|
|
return test_bit(bitnum, addr);
|
|
#elif defined CONFIG_PREEMPT_COUNT
|
|
return preempt_count();
|
|
#else
|
|
return 1;
|
|
#endif
|
|
}
|
|
|
|
#endif /* __LINUX_BIT_SPINLOCK_H */
|
|
|