2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 06:34:11 +08:00
linux-next/include/linux/preempt.h
Steven Rostedt 6cd8a4bb2f ftrace: trace preempt off critical timings
Add preempt off timings. A lot of kernel core code is taken from the RT patch
latency trace that was written by Ingo Molnar.

This adds "preemptoff" and "preemptirqsoff" to /debugfs/tracing/available_tracers

Now instead of just tracing irqs off, preemption off can be selected
to be recorded.

When this is selected, it shares the same files as irqs off timings.
One can either trace preemption off, irqs off, or one or the other off.

By echoing "preemptoff" into /debugfs/tracing/current_tracer, recording
of preempt off only is performed. "irqsoff" will only record the time
irqs are disabled, but "preemptirqsoff" will take the total time irqs
or preemption are disabled. Runtime switching of these options is now
supported by simpling echoing in the appropriate trace name into
/debugfs/tracing/current_tracer.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-23 20:32:54 +02:00

140 lines
3.5 KiB
C

#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
/*
* include/linux/preempt.h - macros for accessing and manipulating
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
#include <linux/thread_info.h>
#include <linux/linkage.h>
#include <linux/list.h>
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void add_preempt_count(int val);
extern void sub_preempt_count(int val);
#else
# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
#endif
#define inc_preempt_count() add_preempt_count(1)
#define dec_preempt_count() sub_preempt_count(1)
#define preempt_count() (current_thread_info()->preempt_count)
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
#define preempt_disable() \
do { \
inc_preempt_count(); \
barrier(); \
} while (0)
#define preempt_enable_no_resched() \
do { \
barrier(); \
dec_preempt_count(); \
} while (0)
#define preempt_check_resched() \
do { \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define preempt_enable() \
do { \
preempt_enable_no_resched(); \
barrier(); \
preempt_check_resched(); \
} while (0)
/* For debugging and tracer internals only! */
#define add_preempt_count_notrace(val) \
do { preempt_count() += (val); } while (0)
#define sub_preempt_count_notrace(val) \
do { preempt_count() -= (val); } while (0)
#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
#define preempt_disable_notrace() \
do { \
inc_preempt_count_notrace(); \
barrier(); \
} while (0)
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
dec_preempt_count_notrace(); \
} while (0)
/* preempt_check_resched is OK to trace */
#define preempt_enable_notrace() \
do { \
preempt_enable_no_resched_notrace(); \
barrier(); \
preempt_check_resched(); \
} while (0)
#else
#define preempt_disable() do { } while (0)
#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)
#define preempt_check_resched() do { } while (0)
#define preempt_disable_notrace() do { } while (0)
#define preempt_enable_no_resched_notrace() do { } while (0)
#define preempt_enable_notrace() do { } while (0)
#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
/**
* preempt_ops - notifiers called when a task is preempted and rescheduled
* @sched_in: we're about to be rescheduled:
* notifier: struct preempt_notifier for the task being scheduled
* cpu: cpu we're scheduled on
* @sched_out: we've just been preempted
* notifier: struct preempt_notifier for the task being preempted
* next: the task that's kicking us out
*/
struct preempt_ops {
void (*sched_in)(struct preempt_notifier *notifier, int cpu);
void (*sched_out)(struct preempt_notifier *notifier,
struct task_struct *next);
};
/**
* preempt_notifier - key for installing preemption notifiers
* @link: internal use
* @ops: defines the notifier functions to be called
*
* Usually used in conjunction with container_of().
*/
struct preempt_notifier {
struct hlist_node link;
struct preempt_ops *ops;
};
void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
struct preempt_ops *ops)
{
INIT_HLIST_NODE(&notifier->link);
notifier->ops = ops;
}
#endif
#endif /* __LINUX_PREEMPT_H */