mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 15:14:18 +08:00
ce5e48036c
As the documentation explained, ftrace_test_recursion_trylock() and ftrace_test_recursion_unlock() were supposed to disable and enable preemption properly, however currently this work is done outside of the function, which could be missing by mistake. And since the internal using of trace_test_and_set_recursion() and trace_clear_recursion() also require preemption disabled, we can just merge the logical. This patch will make sure the preemption has been disabled when trace_test_and_set_recursion() return bit >= 0, and trace_clear_recursion() will enable the preemption if previously enabled. Link: https://lkml.kernel.org/r/13bde807-779c-aa4c-0672-20515ae365ea@linux.alibaba.com CC: Petr Mladek <pmladek@suse.com> Cc: Guo Ren <guoren@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Helge Deller <deller@gmx.de> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Albert Ou <aou@eecs.berkeley.edu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Borislav Petkov <bp@alien8.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Jisheng Zhang <jszhang@kernel.org> CC: Steven Rostedt <rostedt@goodmis.org> CC: Miroslav Benes <mbenes@suse.cz> Reported-by: Abaci <abaci@linux.alibaba.com> Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Michael Wang <yun.wang@linux.alibaba.com> [ Removed extra line in comment - SDR ] Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
73 lines
1.8 KiB
C
73 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Dynamic Ftrace based Kprobes Optimization
|
|
*
|
|
* Copyright (C) Hitachi Ltd., 2012
|
|
* Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
|
|
* IBM Corporation
|
|
*/
|
|
#include <linux/kprobes.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/preempt.h>
|
|
#include <linux/ftrace.h>
|
|
|
|
/* Ftrace callback handler for kprobes */
|
|
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
|
|
struct ftrace_ops *ops, struct ftrace_regs *fregs)
|
|
{
|
|
struct kprobe *p;
|
|
struct kprobe_ctlblk *kcb;
|
|
struct pt_regs *regs;
|
|
int bit;
|
|
|
|
bit = ftrace_test_recursion_trylock(nip, parent_nip);
|
|
if (bit < 0)
|
|
return;
|
|
|
|
regs = ftrace_get_regs(fregs);
|
|
p = get_kprobe((kprobe_opcode_t *)nip);
|
|
if (unlikely(!p) || kprobe_disabled(p))
|
|
goto out;
|
|
|
|
kcb = get_kprobe_ctlblk();
|
|
if (kprobe_running()) {
|
|
kprobes_inc_nmissed_count(p);
|
|
} else {
|
|
/*
|
|
* On powerpc, NIP is *before* this instruction for the
|
|
* pre handler
|
|
*/
|
|
regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
|
|
|
|
__this_cpu_write(current_kprobe, p);
|
|
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
|
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
|
/*
|
|
* Emulate singlestep (and also recover regs->nip)
|
|
* as if there is a nop
|
|
*/
|
|
regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
|
|
if (unlikely(p->post_handler)) {
|
|
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
|
p->post_handler(p, regs, 0);
|
|
}
|
|
}
|
|
/*
|
|
* If pre_handler returns !0, it changes regs->nip. We have to
|
|
* skip emulating post_handler.
|
|
*/
|
|
__this_cpu_write(current_kprobe, NULL);
|
|
}
|
|
out:
|
|
ftrace_test_recursion_unlock(bit);
|
|
}
|
|
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
|
|
|
|
int arch_prepare_kprobe_ftrace(struct kprobe *p)
|
|
{
|
|
p->ainsn.insn = NULL;
|
|
p->ainsn.boostable = -1;
|
|
return 0;
|
|
}
|