2013-07-12 08:15:49 +08:00
|
|
|
#ifndef _LINUX_CONTEXT_TRACKING_STATE_H
|
|
|
|
#define _LINUX_CONTEXT_TRACKING_STATE_H
|
|
|
|
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/static_key.h>
|
|
|
|
|
|
|
|
struct context_tracking {
|
|
|
|
/*
|
|
|
|
* When active is false, probes are unset in order
|
|
|
|
* to minimize overhead: TIF flags are cleared
|
|
|
|
* and calls to user_enter/exit are ignored. This
|
|
|
|
* may be further optimized using static keys.
|
|
|
|
*/
|
|
|
|
bool active;
|
|
|
|
enum ctx_state {
|
2015-03-05 01:06:33 +08:00
|
|
|
CONTEXT_KERNEL = 0,
|
|
|
|
CONTEXT_USER,
|
2013-07-12 08:15:49 +08:00
|
|
|
} state;
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
|
|
|
extern struct static_key context_tracking_enabled;
|
|
|
|
DECLARE_PER_CPU(struct context_tracking, context_tracking);
|
|
|
|
|
2013-11-06 21:45:57 +08:00
|
|
|
static inline bool context_tracking_is_enabled(void)
|
|
|
|
{
|
|
|
|
return static_key_false(&context_tracking_enabled);
|
|
|
|
}
|
2013-11-06 22:11:57 +08:00
|
|
|
|
|
|
|
static inline bool context_tracking_cpu_is_enabled(void)
|
2013-07-12 08:15:49 +08:00
|
|
|
{
|
2013-11-06 22:11:57 +08:00
|
|
|
return __this_cpu_read(context_tracking.active);
|
2013-07-12 08:15:49 +08:00
|
|
|
}
|
|
|
|
|
2013-11-06 22:11:57 +08:00
|
|
|
static inline bool context_tracking_in_user(void)
|
2013-07-12 08:15:49 +08:00
|
|
|
{
|
2015-03-05 01:06:33 +08:00
|
|
|
return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
|
2013-07-12 08:15:49 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool context_tracking_in_user(void) { return false; }
|
|
|
|
static inline bool context_tracking_active(void) { return false; }
|
2015-02-11 04:27:51 +08:00
|
|
|
static inline bool context_tracking_is_enabled(void) { return false; }
|
|
|
|
static inline bool context_tracking_cpu_is_enabled(void) { return false; }
|
2013-07-12 08:15:49 +08:00
|
|
|
#endif /* CONFIG_CONTEXT_TRACKING */
|
|
|
|
|
|
|
|
#endif
|