mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 04:03:58 +08:00
9ea4c38006
Currently all _bh_ lock functions do two preempt_count operations: local_bh_disable(); preempt_disable(); and for the unlock: preempt_enable_no_resched(); local_bh_enable(); Since its a waste of perfectly good cycles to modify the same variable twice when you can do it in one go; use the new __local_bh_{dis,en}able_ip() functions that allow us to provide a preempt_count value to add/sub. So define SOFTIRQ_LOCK_OFFSET as the offset a _bh_ lock needs to add/sub to be done in one go. As a bonus it gets rid of the preempt_enable_no_resched() usage. This reduces a 1000 loops of: spin_lock_bh(&bh_lock); spin_unlock_bh(&bh_lock); from 53596 cycles to 51995 cycles. I didn't do enough measurements to say for absolute sure that the result is significant but the the few runs I did for each suggest it is so. Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: jacob.jun.pan@linux.intel.com Cc: Mike Galbraith <bitbucket@online.de> Cc: hpa@zytor.com Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: lenb@kernel.org Cc: rjw@rjwysocki.net Cc: rui.zhang@intel.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20131119151338.GF3694@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
193 lines
5.4 KiB
C
193 lines
5.4 KiB
C
#ifndef __LINUX_SPINLOCK_API_SMP_H
|
|
#define __LINUX_SPINLOCK_API_SMP_H
|
|
|
|
#ifndef __LINUX_SPINLOCK_H
|
|
# error "please don't include this file directly"
|
|
#endif
|
|
|
|
/*
|
|
* include/linux/spinlock_api_smp.h
|
|
*
|
|
* spinlock API declarations on SMP (and debug)
|
|
* (implemented in kernel/spinlock.c)
|
|
*
|
|
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
|
|
* Released under the General Public License (GPL).
|
|
*/
|
|
|
|
int in_lock_functions(unsigned long addr);
|
|
|
|
#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
|
|
|
|
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
|
|
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
|
|
__acquires(lock);
|
|
void __lockfunc
|
|
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
|
|
__acquires(lock);
|
|
void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
|
|
void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
|
|
__acquires(lock);
|
|
|
|
unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
|
|
__acquires(lock);
|
|
unsigned long __lockfunc
|
|
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
|
|
__acquires(lock);
|
|
int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
|
|
int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
|
|
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
|
|
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
|
|
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
|
|
void __lockfunc
|
|
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
|
|
__releases(lock);
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_LOCK
|
|
#define _raw_spin_lock(lock) __raw_spin_lock(lock)
|
|
#endif
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
|
|
#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
|
|
#endif
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
|
|
#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
|
|
#endif
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
|
|
#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
|
|
#endif
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
|
|
#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
|
|
#endif
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
|
|
#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
|
|
#endif
|
|
|
|
#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
|
|
#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
|
|
#endif
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
|
|
#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
|
|
#endif
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
|
|
#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
|
|
#endif
|
|
|
|
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
|
|
#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
|
|
#endif
|
|
|
|
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
|
{
|
|
preempt_disable();
|
|
if (do_raw_spin_trylock(lock)) {
|
|
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
return 1;
|
|
}
|
|
preempt_enable();
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* If lockdep is enabled then we use the non-preemption spin-ops
|
|
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
|
|
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
|
|
*/
|
|
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
|
|
|
|
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
preempt_disable();
|
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
/*
|
|
* On lockdep we dont want the hand-coded irq-enable of
|
|
* do_raw_spin_lock_flags() code, because lockdep assumes
|
|
* that interrupts are not re-enabled during lock-acquire:
|
|
*/
|
|
#ifdef CONFIG_LOCKDEP
|
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
|
#else
|
|
do_raw_spin_lock_flags(lock, &flags);
|
|
#endif
|
|
return flags;
|
|
}
|
|
|
|
static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
|
|
{
|
|
local_irq_disable();
|
|
preempt_disable();
|
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
|
}
|
|
|
|
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
|
|
{
|
|
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
|
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
|
}
|
|
|
|
static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|
{
|
|
preempt_disable();
|
|
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
|
|
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
|
}
|
|
|
|
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
|
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|
{
|
|
spin_release(&lock->dep_map, 1, _RET_IP_);
|
|
do_raw_spin_unlock(lock);
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
|
|
unsigned long flags)
|
|
{
|
|
spin_release(&lock->dep_map, 1, _RET_IP_);
|
|
do_raw_spin_unlock(lock);
|
|
local_irq_restore(flags);
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
|
|
{
|
|
spin_release(&lock->dep_map, 1, _RET_IP_);
|
|
do_raw_spin_unlock(lock);
|
|
local_irq_enable();
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
|
|
{
|
|
spin_release(&lock->dep_map, 1, _RET_IP_);
|
|
do_raw_spin_unlock(lock);
|
|
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
|
|
}
|
|
|
|
static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
|
|
{
|
|
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
|
|
if (do_raw_spin_trylock(lock)) {
|
|
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
|
return 1;
|
|
}
|
|
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
|
|
return 0;
|
|
}
|
|
|
|
#include <linux/rwlock_api_smp.h>
|
|
|
|
#endif /* __LINUX_SPINLOCK_API_SMP_H */
|