mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
ee556d00cf
When function graph tracer is enabled, the following operation will trigger panic: mount -t debugfs nodev /sys/kernel echo next_tgid > /sys/kernel/tracing/set_ftrace_filter echo function_graph > /sys/kernel/tracing/current_tracer ls /proc/ ------------[ cut here ]------------ [ 198.501417] Unable to handle kernel paging request at virtual address cb88537fdc8ba316 [ 198.506126] pgd = ffffffc008f79000 [ 198.509363] [cb88537fdc8ba316] *pgd=00000000488c6003, *pud=00000000488c6003, *pmd=0000000000000000 [ 198.517726] Internal error: Oops: 94000005 [#1] SMP [ 198.518798] Modules linked in: [ 198.520582] CPU: 1 PID: 1388 Comm: ls Tainted: G [ 198.521800] Hardware name: linux,dummy-virt (DT) [ 198.522852] task: ffffffc0fa9e8000 ti: ffffffc0f9ab0000 task.ti: ffffffc0f9ab0000 [ 198.524306] PC is at next_tgid+0x30/0x100 [ 198.525205] LR is at return_to_handler+0x0/0x20 [ 198.526090] pc : [<ffffffc0002a1070>] lr : [<ffffffc0000907c0>] pstate: 60000145 [ 198.527392] sp : ffffffc0f9ab3d40 [ 198.528084] x29: ffffffc0f9ab3d40 x28: ffffffc0f9ab0000 [ 198.529406] x27: ffffffc000d6a000 x26: ffffffc000b786e8 [ 198.530659] x25: ffffffc0002a1900 x24: ffffffc0faf16c00 [ 198.531942] x23: ffffffc0f9ab3ea0 x22: 0000000000000002 [ 198.533202] x21: ffffffc000d85050 x20: 0000000000000002 [ 198.534446] x19: 0000000000000002 x18: 0000000000000000 [ 198.535719] x17: 000000000049fa08 x16: ffffffc000242efc [ 198.537030] x15: 0000007fa472b54c x14: ffffffffff000000 [ 198.538347] x13: ffffffc0fada84a0 x12: 0000000000000001 [ 198.539634] x11: ffffffc0f9ab3d70 x10: ffffffc0f9ab3d70 [ 198.540915] x9 : ffffffc0000907c0 x8 : ffffffc0f9ab3d40 [ 198.542215] x7 : 0000002e330f08f0 x6 : 0000000000000015 [ 198.543508] x5 : 0000000000000f08 x4 : ffffffc0f9835ec0 [ 198.544792] x3 : cb88537fdc8ba316 x2 : cb88537fdc8ba306 [ 198.546108] x1 : 0000000000000002 x0 : ffffffc000d85050 [ 198.547432] [ 198.547920] Process ls (pid: 1388, stack limit = 0xffffffc0f9ab0020) [ 198.549170] Stack: (0xffffffc0f9ab3d40 to 0xffffffc0f9ab4000) [ 198.582568] Call trace: [ 198.583313] [<ffffffc0002a1070>] next_tgid+0x30/0x100 [ 198.584359] [<ffffffc0000907bc>] ftrace_graph_caller+0x6c/0x70 [ 198.585503] [<ffffffc0000907bc>] ftrace_graph_caller+0x6c/0x70 [ 198.586574] [<ffffffc0000907bc>] ftrace_graph_caller+0x6c/0x70 [ 198.587660] [<ffffffc0000907bc>] ftrace_graph_caller+0x6c/0x70 [ 198.588896] Code: aa0003f5 2a0103f4 b4000102 91004043 (885f7c60) [ 198.591092] ---[ end trace 6a346f8f20949ac8 ]--- This is because when using function graph tracer, if the traced function return value is in multi regs ([x0-x7]), return_to_handler may corrupt them. So in return_to_handler, the parameter regs should be protected properly. Cc: <stable@vger.kernel.org> # 3.18+ Signed-off-by: Li Bin <huawei.libin@huawei.com> Acked-by: AKASHI Takahiro <takahiro.akashi@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
233 lines
6.2 KiB
ArmAsm
233 lines
6.2 KiB
ArmAsm
/*
|
|
* arch/arm64/kernel/entry-ftrace.S
|
|
*
|
|
* Copyright (C) 2013 Linaro Limited
|
|
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/ftrace.h>
|
|
#include <asm/insn.h>
|
|
|
|
/*
|
|
* Gcc with -pg will put the following code in the beginning of each function:
|
|
* mov x0, x30
|
|
* bl _mcount
|
|
* [function's body ...]
|
|
* "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
|
|
* ftrace is enabled.
|
|
*
|
|
* Please note that x0 as an argument will not be used here because we can
|
|
* get lr(x30) of instrumented function at any time by winding up call stack
|
|
* as long as the kernel is compiled without -fomit-frame-pointer.
|
|
* (or CONFIG_FRAME_POINTER, this is forced on arm64)
|
|
*
|
|
* stack layout after mcount_enter in _mcount():
|
|
*
|
|
* current sp/fp => 0:+-----+
|
|
* in _mcount() | x29 | -> instrumented function's fp
|
|
* +-----+
|
|
* | x30 | -> _mcount()'s lr (= instrumented function's pc)
|
|
* old sp => +16:+-----+
|
|
* when instrumented | |
|
|
* function calls | ... |
|
|
* _mcount() | |
|
|
* | |
|
|
* instrumented => +xx:+-----+
|
|
* function's fp | x29 | -> parent's fp
|
|
* +-----+
|
|
* | x30 | -> instrumented function's lr (= parent's pc)
|
|
* +-----+
|
|
* | ... |
|
|
*/
|
|
|
|
.macro mcount_enter
|
|
stp x29, x30, [sp, #-16]!
|
|
mov x29, sp
|
|
.endm
|
|
|
|
.macro mcount_exit
|
|
ldp x29, x30, [sp], #16
|
|
ret
|
|
.endm
|
|
|
|
.macro mcount_adjust_addr rd, rn
|
|
sub \rd, \rn, #AARCH64_INSN_SIZE
|
|
.endm
|
|
|
|
/* for instrumented function's parent */
|
|
.macro mcount_get_parent_fp reg
|
|
ldr \reg, [x29]
|
|
ldr \reg, [\reg]
|
|
.endm
|
|
|
|
/* for instrumented function */
|
|
.macro mcount_get_pc0 reg
|
|
mcount_adjust_addr \reg, x30
|
|
.endm
|
|
|
|
.macro mcount_get_pc reg
|
|
ldr \reg, [x29, #8]
|
|
mcount_adjust_addr \reg, \reg
|
|
.endm
|
|
|
|
.macro mcount_get_lr reg
|
|
ldr \reg, [x29]
|
|
ldr \reg, [\reg, #8]
|
|
mcount_adjust_addr \reg, \reg
|
|
.endm
|
|
|
|
.macro mcount_get_lr_addr reg
|
|
ldr \reg, [x29]
|
|
add \reg, \reg, #8
|
|
.endm
|
|
|
|
#ifndef CONFIG_DYNAMIC_FTRACE
|
|
/*
|
|
* void _mcount(unsigned long return_address)
|
|
* @return_address: return address to instrumented function
|
|
*
|
|
* This function makes calls, if enabled, to:
|
|
* - tracer function to probe instrumented function's entry,
|
|
* - ftrace_graph_caller to set up an exit hook
|
|
*/
|
|
ENTRY(_mcount)
|
|
mcount_enter
|
|
|
|
adrp x0, ftrace_trace_function
|
|
ldr x2, [x0, #:lo12:ftrace_trace_function]
|
|
adr x0, ftrace_stub
|
|
cmp x0, x2 // if (ftrace_trace_function
|
|
b.eq skip_ftrace_call // != ftrace_stub) {
|
|
|
|
mcount_get_pc x0 // function's pc
|
|
mcount_get_lr x1 // function's lr (= parent's pc)
|
|
blr x2 // (*ftrace_trace_function)(pc, lr);
|
|
|
|
#ifndef CONFIG_FUNCTION_GRAPH_TRACER
|
|
skip_ftrace_call: // return;
|
|
mcount_exit // }
|
|
#else
|
|
mcount_exit // return;
|
|
// }
|
|
skip_ftrace_call:
|
|
adrp x1, ftrace_graph_return
|
|
ldr x2, [x1, #:lo12:ftrace_graph_return]
|
|
cmp x0, x2 // if ((ftrace_graph_return
|
|
b.ne ftrace_graph_caller // != ftrace_stub)
|
|
|
|
adrp x1, ftrace_graph_entry // || (ftrace_graph_entry
|
|
adrp x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
|
|
ldr x2, [x1, #:lo12:ftrace_graph_entry]
|
|
add x0, x0, #:lo12:ftrace_graph_entry_stub
|
|
cmp x0, x2
|
|
b.ne ftrace_graph_caller // ftrace_graph_caller();
|
|
|
|
mcount_exit
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
ENDPROC(_mcount)
|
|
|
|
#else /* CONFIG_DYNAMIC_FTRACE */
|
|
/*
|
|
* _mcount() is used to build the kernel with -pg option, but all the branch
|
|
* instructions to _mcount() are replaced to NOP initially at kernel start up,
|
|
* and later on, NOP to branch to ftrace_caller() when enabled or branch to
|
|
* NOP when disabled per-function base.
|
|
*/
|
|
ENTRY(_mcount)
|
|
ret
|
|
ENDPROC(_mcount)
|
|
|
|
/*
|
|
* void ftrace_caller(unsigned long return_address)
|
|
* @return_address: return address to instrumented function
|
|
*
|
|
* This function is a counterpart of _mcount() in 'static' ftrace, and
|
|
* makes calls to:
|
|
* - tracer function to probe instrumented function's entry,
|
|
* - ftrace_graph_caller to set up an exit hook
|
|
*/
|
|
ENTRY(ftrace_caller)
|
|
mcount_enter
|
|
|
|
mcount_get_pc0 x0 // function's pc
|
|
mcount_get_lr x1 // function's lr
|
|
|
|
.global ftrace_call
|
|
ftrace_call: // tracer(pc, lr);
|
|
nop // This will be replaced with "bl xxx"
|
|
// where xxx can be any kind of tracer.
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
.global ftrace_graph_call
|
|
ftrace_graph_call: // ftrace_graph_caller();
|
|
nop // If enabled, this will be replaced
|
|
// "b ftrace_graph_caller"
|
|
#endif
|
|
|
|
mcount_exit
|
|
ENDPROC(ftrace_caller)
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
ENTRY(ftrace_stub)
|
|
ret
|
|
ENDPROC(ftrace_stub)
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
/* save return value regs*/
|
|
.macro save_return_regs
|
|
sub sp, sp, #64
|
|
stp x0, x1, [sp]
|
|
stp x2, x3, [sp, #16]
|
|
stp x4, x5, [sp, #32]
|
|
stp x6, x7, [sp, #48]
|
|
.endm
|
|
|
|
/* restore return value regs*/
|
|
.macro restore_return_regs
|
|
ldp x0, x1, [sp]
|
|
ldp x2, x3, [sp, #16]
|
|
ldp x4, x5, [sp, #32]
|
|
ldp x6, x7, [sp, #48]
|
|
add sp, sp, #64
|
|
.endm
|
|
|
|
/*
|
|
* void ftrace_graph_caller(void)
|
|
*
|
|
* Called from _mcount() or ftrace_caller() when function_graph tracer is
|
|
* selected.
|
|
* This function w/ prepare_ftrace_return() fakes link register's value on
|
|
* the call stack in order to intercept instrumented function's return path
|
|
* and run return_to_handler() later on its exit.
|
|
*/
|
|
ENTRY(ftrace_graph_caller)
|
|
mcount_get_lr_addr x0 // pointer to function's saved lr
|
|
mcount_get_pc x1 // function's pc
|
|
mcount_get_parent_fp x2 // parent's fp
|
|
bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
|
|
|
|
mcount_exit
|
|
ENDPROC(ftrace_graph_caller)
|
|
|
|
/*
|
|
* void return_to_handler(void)
|
|
*
|
|
* Run ftrace_return_to_handler() before going back to parent.
|
|
* @fp is checked against the value passed by ftrace_graph_caller()
|
|
* only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
|
|
*/
|
|
ENTRY(return_to_handler)
|
|
save_return_regs
|
|
mov x0, x29 // parent's fp
|
|
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
|
|
mov x30, x0 // restore the original return address
|
|
restore_return_regs
|
|
ret
|
|
END(return_to_handler)
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|