mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 16:44:10 +08:00
6e803e2e6e
The core ftrace code requires that when it is handed the PC of an instrumented function, this PC is the address of the instrumented instruction. This is necessary so that the core ftrace code can identify the specific instrumentation site. Since the instrumented function will be a BL, the address of the instrumented function is LR - 4 at entry to the ftrace code. This fixup is applied in the mcount_get_pc and mcount_get_pc0 helpers, which acquire the PC of the instrumented function. The mcount_get_lr helper is used to acquire the LR of the instrumented function, whose value does not require this adjustment, and cannot be adjusted to anything meaningful. No adjustment of this value is made on other architectures, including arm. However, arm64 adjusts this value by 4. This patch brings arm64 in line with other architectures and removes the adjustment of the LR value. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: AKASHI Takahiro <takahiro.akashi@linaro.org> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Torsten Duwe <duwe@suse.de> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
220 lines
5.9 KiB
ArmAsm
220 lines
5.9 KiB
ArmAsm
/*
|
|
* arch/arm64/kernel/entry-ftrace.S
|
|
*
|
|
* Copyright (C) 2013 Linaro Limited
|
|
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/ftrace.h>
|
|
#include <asm/insn.h>
|
|
|
|
/*
|
|
* Gcc with -pg will put the following code in the beginning of each function:
|
|
* mov x0, x30
|
|
* bl _mcount
|
|
* [function's body ...]
|
|
* "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
|
|
* ftrace is enabled.
|
|
*
|
|
* Please note that x0 as an argument will not be used here because we can
|
|
* get lr(x30) of instrumented function at any time by winding up call stack
|
|
* as long as the kernel is compiled without -fomit-frame-pointer.
|
|
* (or CONFIG_FRAME_POINTER, this is forced on arm64)
|
|
*
|
|
* stack layout after mcount_enter in _mcount():
|
|
*
|
|
* current sp/fp => 0:+-----+
|
|
* in _mcount() | x29 | -> instrumented function's fp
|
|
* +-----+
|
|
* | x30 | -> _mcount()'s lr (= instrumented function's pc)
|
|
* old sp => +16:+-----+
|
|
* when instrumented | |
|
|
* function calls | ... |
|
|
* _mcount() | |
|
|
* | |
|
|
* instrumented => +xx:+-----+
|
|
* function's fp | x29 | -> parent's fp
|
|
* +-----+
|
|
* | x30 | -> instrumented function's lr (= parent's pc)
|
|
* +-----+
|
|
* | ... |
|
|
*/
|
|
|
|
.macro mcount_enter
|
|
stp x29, x30, [sp, #-16]!
|
|
mov x29, sp
|
|
.endm
|
|
|
|
.macro mcount_exit
|
|
ldp x29, x30, [sp], #16
|
|
ret
|
|
.endm
|
|
|
|
.macro mcount_adjust_addr rd, rn
|
|
sub \rd, \rn, #AARCH64_INSN_SIZE
|
|
.endm
|
|
|
|
/* for instrumented function's parent */
|
|
.macro mcount_get_parent_fp reg
|
|
ldr \reg, [x29]
|
|
ldr \reg, [\reg]
|
|
.endm
|
|
|
|
/* for instrumented function */
|
|
.macro mcount_get_pc0 reg
|
|
mcount_adjust_addr \reg, x30
|
|
.endm
|
|
|
|
.macro mcount_get_pc reg
|
|
ldr \reg, [x29, #8]
|
|
mcount_adjust_addr \reg, \reg
|
|
.endm
|
|
|
|
.macro mcount_get_lr reg
|
|
ldr \reg, [x29]
|
|
ldr \reg, [\reg, #8]
|
|
.endm
|
|
|
|
.macro mcount_get_lr_addr reg
|
|
ldr \reg, [x29]
|
|
add \reg, \reg, #8
|
|
.endm
|
|
|
|
#ifndef CONFIG_DYNAMIC_FTRACE
|
|
/*
|
|
* void _mcount(unsigned long return_address)
|
|
* @return_address: return address to instrumented function
|
|
*
|
|
* This function makes calls, if enabled, to:
|
|
* - tracer function to probe instrumented function's entry,
|
|
* - ftrace_graph_caller to set up an exit hook
|
|
*/
|
|
ENTRY(_mcount)
|
|
mcount_enter
|
|
|
|
ldr_l x2, ftrace_trace_function
|
|
adr x0, ftrace_stub
|
|
cmp x0, x2 // if (ftrace_trace_function
|
|
b.eq skip_ftrace_call // != ftrace_stub) {
|
|
|
|
mcount_get_pc x0 // function's pc
|
|
mcount_get_lr x1 // function's lr (= parent's pc)
|
|
blr x2 // (*ftrace_trace_function)(pc, lr);
|
|
|
|
skip_ftrace_call: // }
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
ldr_l x2, ftrace_graph_return
|
|
cmp x0, x2 // if ((ftrace_graph_return
|
|
b.ne ftrace_graph_caller // != ftrace_stub)
|
|
|
|
ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
|
|
adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
|
|
cmp x0, x2
|
|
b.ne ftrace_graph_caller // ftrace_graph_caller();
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
mcount_exit
|
|
ENDPROC(_mcount)
|
|
|
|
#else /* CONFIG_DYNAMIC_FTRACE */
|
|
/*
|
|
* _mcount() is used to build the kernel with -pg option, but all the branch
|
|
* instructions to _mcount() are replaced to NOP initially at kernel start up,
|
|
* and later on, NOP to branch to ftrace_caller() when enabled or branch to
|
|
* NOP when disabled per-function base.
|
|
*/
|
|
ENTRY(_mcount)
|
|
ret
|
|
ENDPROC(_mcount)
|
|
|
|
/*
|
|
* void ftrace_caller(unsigned long return_address)
|
|
* @return_address: return address to instrumented function
|
|
*
|
|
* This function is a counterpart of _mcount() in 'static' ftrace, and
|
|
* makes calls to:
|
|
* - tracer function to probe instrumented function's entry,
|
|
* - ftrace_graph_caller to set up an exit hook
|
|
*/
|
|
ENTRY(ftrace_caller)
|
|
mcount_enter
|
|
|
|
mcount_get_pc0 x0 // function's pc
|
|
mcount_get_lr x1 // function's lr
|
|
|
|
GLOBAL(ftrace_call) // tracer(pc, lr);
|
|
nop // This will be replaced with "bl xxx"
|
|
// where xxx can be any kind of tracer.
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
|
|
nop // If enabled, this will be replaced
|
|
// "b ftrace_graph_caller"
|
|
#endif
|
|
|
|
mcount_exit
|
|
ENDPROC(ftrace_caller)
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
ENTRY(ftrace_stub)
|
|
ret
|
|
ENDPROC(ftrace_stub)
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
/* save return value regs*/
|
|
.macro save_return_regs
|
|
sub sp, sp, #64
|
|
stp x0, x1, [sp]
|
|
stp x2, x3, [sp, #16]
|
|
stp x4, x5, [sp, #32]
|
|
stp x6, x7, [sp, #48]
|
|
.endm
|
|
|
|
/* restore return value regs*/
|
|
.macro restore_return_regs
|
|
ldp x0, x1, [sp]
|
|
ldp x2, x3, [sp, #16]
|
|
ldp x4, x5, [sp, #32]
|
|
ldp x6, x7, [sp, #48]
|
|
add sp, sp, #64
|
|
.endm
|
|
|
|
/*
|
|
* void ftrace_graph_caller(void)
|
|
*
|
|
* Called from _mcount() or ftrace_caller() when function_graph tracer is
|
|
* selected.
|
|
* This function w/ prepare_ftrace_return() fakes link register's value on
|
|
* the call stack in order to intercept instrumented function's return path
|
|
* and run return_to_handler() later on its exit.
|
|
*/
|
|
ENTRY(ftrace_graph_caller)
|
|
mcount_get_lr_addr x0 // pointer to function's saved lr
|
|
mcount_get_pc x1 // function's pc
|
|
mcount_get_parent_fp x2 // parent's fp
|
|
bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
|
|
|
|
mcount_exit
|
|
ENDPROC(ftrace_graph_caller)
|
|
|
|
/*
|
|
* void return_to_handler(void)
|
|
*
|
|
* Run ftrace_return_to_handler() before going back to parent.
|
|
* @fp is checked against the value passed by ftrace_graph_caller().
|
|
*/
|
|
ENTRY(return_to_handler)
|
|
save_return_regs
|
|
mov x0, x29 // parent's fp
|
|
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
|
|
mov x30, x0 // restore the original return address
|
|
restore_return_regs
|
|
ret
|
|
END(return_to_handler)
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|