mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
tracing/kprobe: bpf: Compare instruction pointer with original one
Compare instruction pointer with original one on the stack instead using per-cpu bpf_kprobe_override flag. This patch also consolidates reset_current_kprobe() and preempt_enable_no_resched() blocks. Those can be done in one place. Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
b4da3340ea
commit
66665ad2f1
@ -83,7 +83,6 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
|
||||
#ifdef CONFIG_BPF_KPROBE_OVERRIDE
|
||||
BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
|
||||
{
|
||||
__this_cpu_write(bpf_kprobe_override, 1);
|
||||
regs_set_return_value(regs, rc);
|
||||
arch_kprobe_override_function(regs);
|
||||
return 0;
|
||||
|
@ -42,8 +42,6 @@ struct trace_kprobe {
|
||||
(offsetof(struct trace_kprobe, tp.args) + \
|
||||
(sizeof(struct probe_arg) * (n)))
|
||||
|
||||
DEFINE_PER_CPU(int, bpf_kprobe_override);
|
||||
|
||||
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
|
||||
{
|
||||
return tk->rp.handler != NULL;
|
||||
@ -1205,6 +1203,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
||||
int rctx;
|
||||
|
||||
if (bpf_prog_array_valid(call)) {
|
||||
unsigned long orig_ip = instruction_pointer(regs);
|
||||
int ret;
|
||||
|
||||
ret = trace_call_bpf(call, regs);
|
||||
@ -1212,12 +1211,13 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
|
||||
/*
|
||||
* We need to check and see if we modified the pc of the
|
||||
* pt_regs, and if so clear the kprobe and return 1 so that we
|
||||
* don't do the instruction skipping. Also reset our state so
|
||||
* we are clean the next pass through.
|
||||
* don't do the single stepping.
|
||||
* The ftrace kprobe handler leaves it up to us to re-enable
|
||||
* preemption here before returning if we've modified the ip.
|
||||
*/
|
||||
if (__this_cpu_read(bpf_kprobe_override)) {
|
||||
__this_cpu_write(bpf_kprobe_override, 0);
|
||||
if (orig_ip != instruction_pointer(regs)) {
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
if (!ret)
|
||||
@ -1325,15 +1325,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
|
||||
if (tk->tp.flags & TP_FLAG_TRACE)
|
||||
kprobe_trace_func(tk, regs);
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (tk->tp.flags & TP_FLAG_PROFILE) {
|
||||
if (tk->tp.flags & TP_FLAG_PROFILE)
|
||||
ret = kprobe_perf_func(tk, regs);
|
||||
/*
|
||||
* The ftrace kprobe handler leaves it up to us to re-enable
|
||||
* preemption here before returning if we've modified the ip.
|
||||
*/
|
||||
if (ret)
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user