mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 03:33:59 +08:00
f037360f2e
a) in smp_lock.h #include of sched.h and spinlock.h moved under #ifdef CONFIG_LOCK_KERNEL. b) interrupt.h now explicitly pulls sched.h (not via smp_lock.h from hardirq.h as it used to) c) in three more places we need changes to compensate for (a) - one place in arch/sparc needs string.h now, hardirq.h needs forward declaration of task_struct and preempt.h needs direct include of thread_info.h. d) thread_info-related helpers in sched.h and thread_info.h put under ifndef __HAVE_THREAD_FUNCTIONS. Obviously safe. Signed-off-by: Al Viro <viro@parcelfarce.linux.theplanet.co.uk> Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
54 lines
1.4 KiB
C
54 lines
1.4 KiB
C
#ifndef __LINUX_SMPLOCK_H
|
|
#define __LINUX_SMPLOCK_H
|
|
|
|
#include <linux/config.h>
|
|
#ifdef CONFIG_LOCK_KERNEL
|
|
#include <linux/sched.h>
|
|
#include <linux/spinlock.h>
|
|
|
|
#define kernel_locked() (current->lock_depth >= 0)
|
|
|
|
extern int __lockfunc __reacquire_kernel_lock(void);
|
|
extern void __lockfunc __release_kernel_lock(void);
|
|
|
|
/*
|
|
* Release/re-acquire global kernel lock for the scheduler
|
|
*/
|
|
#define release_kernel_lock(tsk) do { \
|
|
if (unlikely((tsk)->lock_depth >= 0)) \
|
|
__release_kernel_lock(); \
|
|
} while (0)
|
|
|
|
/*
|
|
* Non-SMP kernels will never block on the kernel lock,
|
|
* so we are better off returning a constant zero from
|
|
* reacquire_kernel_lock() so that the compiler can see
|
|
* it at compile-time.
|
|
*/
|
|
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
|
|
# define return_value_on_smp return
|
|
#else
|
|
# define return_value_on_smp
|
|
#endif
|
|
|
|
static inline int reacquire_kernel_lock(struct task_struct *task)
|
|
{
|
|
if (unlikely(task->lock_depth >= 0))
|
|
return_value_on_smp __reacquire_kernel_lock();
|
|
return 0;
|
|
}
|
|
|
|
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
|
|
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
|
|
|
|
#else
|
|
|
|
#define lock_kernel() do { } while(0)
|
|
#define unlock_kernel() do { } while(0)
|
|
#define release_kernel_lock(task) do { } while(0)
|
|
#define reacquire_kernel_lock(task) 0
|
|
#define kernel_locked() 1
|
|
|
|
#endif /* CONFIG_LOCK_KERNEL */
|
|
#endif /* __LINUX_SMPLOCK_H */
|