sched/preempt: Rearrange a few symbols after headers merge

Adjust a few comments, and further integrate a few definitions after
the dumb headers copy.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1431441711-29753-3-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Frederic Weisbecker 2015-05-12 16:41:47 +02:00 committed by Ingo Molnar
parent 92cf211874
commit 2e10e71ce8

View File

@ -9,14 +9,6 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/list.h> #include <linux/list.h>
/*
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
* the other bits -- can't include that header due to inclusion hell.
*/
#define PREEMPT_NEED_RESCHED 0x80000000
#include <asm/preempt.h>
/* /*
* We put the hardirq and softirq counter into the preemption * We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning: * counter. The bitmask has the following meaning:
@ -35,6 +27,7 @@
* HARDIRQ_MASK: 0x000f0000 * HARDIRQ_MASK: 0x000f0000
* NMI_MASK: 0x00100000 * NMI_MASK: 0x00100000
* PREEMPT_ACTIVE: 0x00200000 * PREEMPT_ACTIVE: 0x00200000
* PREEMPT_NEED_RESCHED: 0x80000000
*/ */
#define PREEMPT_BITS 8 #define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8 #define SOFTIRQ_BITS 8
@ -64,6 +57,12 @@
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
#define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
@ -122,12 +121,6 @@
#define in_atomic_preempt_off() \ #define in_atomic_preempt_off() \
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
#ifdef CONFIG_PREEMPT_COUNT
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
#else
# define preemptible() 0
#endif
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val); extern void preempt_count_add(int val);
extern void preempt_count_sub(int val); extern void preempt_count_sub(int val);
@ -160,6 +153,8 @@ do { \
#define preempt_enable_no_resched() sched_preempt_enable_no_resched() #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
@ -232,6 +227,7 @@ do { \
#define preempt_disable_notrace() barrier() #define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier() #define preempt_enable_notrace() barrier()
#define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */ #endif /* CONFIG_PREEMPT_COUNT */