mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
fprobe: Introduce FPROBE_FL_KPROBE_SHARED flag for fprobe
Introduce FPROBE_FL_KPROBE_SHARED flag for sharing fprobe callback with kprobes safely from the viewpoint of recursion. Since the recursion safety of the fprobe (and ftrace) is a bit different from the kprobes, this may cause an issue if user wants to run the same code from the fprobe and the kprobes. The kprobes has per-cpu 'current_kprobe' variable which protects the kprobe handler from recursion in any case. On the other hand, the fprobe uses only ftrace_test_recursion_trylock(), which will allow interrupt context calls another (or same) fprobe during the fprobe user handler is running. This is not a matter in cases if the common callback shared among the kprobes and the fprobe has its own recursion detection, or it can handle the recursion in the different contexts (normal/interrupt/NMI.) But if it relies on the 'current_kprobe' recursion lock, it has to check kprobe_running() and use kprobe_busy_*() APIs. Fprobe has FPROBE_FL_KPROBE_SHARED flag to do this. If your common callback code will be shared with kprobes, please set FPROBE_FL_KPROBE_SHARED *before* registering the fprobe, like; fprobe.flags = FPROBE_FL_KPROBE_SHARED; register_fprobe(&fprobe, "func*", NULL); This will protect your common callback from the nested call. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org> Tested-by: Steven Rostedt (Google) <rostedt@goodmis.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/164735293127.1084943.15687374237275817599.stgit@devnote2
This commit is contained in:
parent
6ee64cc302
commit
ab51e15d53
@ -34,13 +34,25 @@ struct fprobe {
|
||||
void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
|
||||
};
|
||||
|
||||
/* This fprobe is soft-disabled. */
|
||||
#define FPROBE_FL_DISABLED 1
|
||||
|
||||
/*
|
||||
* This fprobe handler will be shared with kprobes.
|
||||
* This flag must be set before registering.
|
||||
*/
|
||||
#define FPROBE_FL_KPROBE_SHARED 2
|
||||
|
||||
static inline bool fprobe_disabled(struct fprobe *fp)
|
||||
{
|
||||
return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
|
||||
}
|
||||
|
||||
static inline bool fprobe_shared_with_kprobes(struct fprobe *fp)
|
||||
{
|
||||
return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FPROBE
|
||||
int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
|
||||
int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
|
||||
|
@ -427,6 +427,9 @@ static inline struct kprobe *kprobe_running(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#define kprobe_busy_begin() do {} while (0)
|
||||
#define kprobe_busy_end() do {} while (0)
|
||||
|
||||
static inline int register_kprobe(struct kprobe *p)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -56,6 +56,20 @@ out:
|
||||
}
|
||||
NOKPROBE_SYMBOL(fprobe_handler);
|
||||
|
||||
static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
||||
{
|
||||
struct fprobe *fp = container_of(ops, struct fprobe, ops);
|
||||
|
||||
if (unlikely(kprobe_running())) {
|
||||
fp->nmissed++;
|
||||
return;
|
||||
}
|
||||
kprobe_busy_begin();
|
||||
fprobe_handler(ip, parent_ip, ops, fregs);
|
||||
kprobe_busy_end();
|
||||
}
|
||||
|
||||
static void fprobe_exit_handler(struct rethook_node *rh, void *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
@ -110,7 +124,10 @@ error:
|
||||
static void fprobe_init(struct fprobe *fp)
|
||||
{
|
||||
fp->nmissed = 0;
|
||||
fp->ops.func = fprobe_handler;
|
||||
if (fprobe_shared_with_kprobes(fp))
|
||||
fp->ops.func = fprobe_kprobe_handler;
|
||||
else
|
||||
fp->ops.func = fprobe_handler;
|
||||
fp->ops.flags |= FTRACE_OPS_FL_SAVE_REGS;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user