mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 11:44:01 +08:00
8592e6486a
498657a478
incorrectly assumed
that preempt wasn't disabled around context_switch() and thus
was fixing imaginary problem. It also broke KVM because it
depended on ->sched_in() to be called with irq enabled so that
it can do smp calls from there.
Revert the incorrect commit and add comment describing different
contexts under with the two callbacks are invoked.
Avi: spotted transposed in/out in the added comment.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Avi Kivity <avi@redhat.com>
Cc: peterz@infradead.org
Cc: efault@gmx.de
Cc: rusty@rustcorp.com.au
LKML-Reference: <1259726212-30259-2-git-send-email-tj@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
145 lines
3.8 KiB
C
145 lines
3.8 KiB
C
#ifndef __LINUX_PREEMPT_H
|
|
#define __LINUX_PREEMPT_H
|
|
|
|
/*
|
|
* include/linux/preempt.h - macros for accessing and manipulating
|
|
* preempt_count (used for kernel preemption, interrupt count, etc.)
|
|
*/
|
|
|
|
#include <linux/thread_info.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/list.h>
|
|
|
|
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
|
|
extern void add_preempt_count(int val);
|
|
extern void sub_preempt_count(int val);
|
|
#else
|
|
# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
|
|
# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
|
|
#endif
|
|
|
|
#define inc_preempt_count() add_preempt_count(1)
|
|
#define dec_preempt_count() sub_preempt_count(1)
|
|
|
|
#define preempt_count() (current_thread_info()->preempt_count)
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
asmlinkage void preempt_schedule(void);
|
|
|
|
#define preempt_disable() \
|
|
do { \
|
|
inc_preempt_count(); \
|
|
barrier(); \
|
|
} while (0)
|
|
|
|
#define preempt_enable_no_resched() \
|
|
do { \
|
|
barrier(); \
|
|
dec_preempt_count(); \
|
|
} while (0)
|
|
|
|
#define preempt_check_resched() \
|
|
do { \
|
|
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
|
|
preempt_schedule(); \
|
|
} while (0)
|
|
|
|
#define preempt_enable() \
|
|
do { \
|
|
preempt_enable_no_resched(); \
|
|
barrier(); \
|
|
preempt_check_resched(); \
|
|
} while (0)
|
|
|
|
/* For debugging and tracer internals only! */
|
|
#define add_preempt_count_notrace(val) \
|
|
do { preempt_count() += (val); } while (0)
|
|
#define sub_preempt_count_notrace(val) \
|
|
do { preempt_count() -= (val); } while (0)
|
|
#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
|
|
#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
|
|
|
|
#define preempt_disable_notrace() \
|
|
do { \
|
|
inc_preempt_count_notrace(); \
|
|
barrier(); \
|
|
} while (0)
|
|
|
|
#define preempt_enable_no_resched_notrace() \
|
|
do { \
|
|
barrier(); \
|
|
dec_preempt_count_notrace(); \
|
|
} while (0)
|
|
|
|
/* preempt_check_resched is OK to trace */
|
|
#define preempt_enable_notrace() \
|
|
do { \
|
|
preempt_enable_no_resched_notrace(); \
|
|
barrier(); \
|
|
preempt_check_resched(); \
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define preempt_disable() do { } while (0)
|
|
#define preempt_enable_no_resched() do { } while (0)
|
|
#define preempt_enable() do { } while (0)
|
|
#define preempt_check_resched() do { } while (0)
|
|
|
|
#define preempt_disable_notrace() do { } while (0)
|
|
#define preempt_enable_no_resched_notrace() do { } while (0)
|
|
#define preempt_enable_notrace() do { } while (0)
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
|
struct preempt_notifier;
|
|
|
|
/**
|
|
* preempt_ops - notifiers called when a task is preempted and rescheduled
|
|
* @sched_in: we're about to be rescheduled:
|
|
* notifier: struct preempt_notifier for the task being scheduled
|
|
* cpu: cpu we're scheduled on
|
|
* @sched_out: we've just been preempted
|
|
* notifier: struct preempt_notifier for the task being preempted
|
|
* next: the task that's kicking us out
|
|
*
|
|
* Please note that sched_in and out are called under different
|
|
* contexts. sched_out is called with rq lock held and irq disabled
|
|
* while sched_in is called without rq lock and irq enabled. This
|
|
* difference is intentional and depended upon by its users.
|
|
*/
|
|
struct preempt_ops {
|
|
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
|
|
void (*sched_out)(struct preempt_notifier *notifier,
|
|
struct task_struct *next);
|
|
};
|
|
|
|
/**
|
|
* preempt_notifier - key for installing preemption notifiers
|
|
* @link: internal use
|
|
* @ops: defines the notifier functions to be called
|
|
*
|
|
* Usually used in conjunction with container_of().
|
|
*/
|
|
struct preempt_notifier {
|
|
struct hlist_node link;
|
|
struct preempt_ops *ops;
|
|
};
|
|
|
|
void preempt_notifier_register(struct preempt_notifier *notifier);
|
|
void preempt_notifier_unregister(struct preempt_notifier *notifier);
|
|
|
|
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
|
|
struct preempt_ops *ops)
|
|
{
|
|
INIT_HLIST_NODE(¬ifier->link);
|
|
notifier->ops = ops;
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif /* __LINUX_PREEMPT_H */
|