mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 03:33:59 +08:00
153086644f
The gcc switch -mprofile-kernel defines a new ABI for calling _mcount() very early in the function with minimal overhead. Although mprofile-kernel has been available since GCC 3.4, there were bugs which were only fixed recently. Currently it is known to work in GCC 4.9, 5 and 6. Additionally there are two possible code sequences generated by the flag, the first uses mflr/std/bl and the second is optimised to omit the std. Currently only gcc 6 has the optimised sequence. This patch supports both sequences. Initial work started by Vojtech Pavlik, used with permission. Key changes: - rework _mcount() to work for both the old and new ABIs. - implement new versions of ftrace_caller() and ftrace_graph_caller() which deal with the new ABI. - updates to __ftrace_make_nop() to recognise the new mcount calling sequence. - updates to __ftrace_make_call() to recognise the nop'ed sequence. - implement ftrace_modify_call(). - updates to the module loader to surpress the toc save in the module stub when calling mcount with the new ABI. Reviewed-by: Balbir Singh <bsingharora@gmail.com> Signed-off-by: Torsten Duwe <duwe@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
85 lines
2.1 KiB
C
85 lines
2.1 KiB
C
#ifndef _ASM_POWERPC_FTRACE
|
|
#define _ASM_POWERPC_FTRACE
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
#define MCOUNT_ADDR ((unsigned long)(_mcount))
|
|
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
/* Based off of objdump optput from glibc */
|
|
|
|
#define MCOUNT_SAVE_FRAME \
|
|
stwu r1,-48(r1); \
|
|
stw r3, 12(r1); \
|
|
stw r4, 16(r1); \
|
|
stw r5, 20(r1); \
|
|
stw r6, 24(r1); \
|
|
mflr r3; \
|
|
lwz r4, 52(r1); \
|
|
mfcr r5; \
|
|
stw r7, 28(r1); \
|
|
stw r8, 32(r1); \
|
|
stw r9, 36(r1); \
|
|
stw r10,40(r1); \
|
|
stw r3, 44(r1); \
|
|
stw r5, 8(r1)
|
|
|
|
#define MCOUNT_RESTORE_FRAME \
|
|
lwz r6, 8(r1); \
|
|
lwz r0, 44(r1); \
|
|
lwz r3, 12(r1); \
|
|
mtctr r0; \
|
|
lwz r4, 16(r1); \
|
|
mtcr r6; \
|
|
lwz r5, 20(r1); \
|
|
lwz r6, 24(r1); \
|
|
lwz r0, 52(r1); \
|
|
lwz r7, 28(r1); \
|
|
lwz r8, 32(r1); \
|
|
mtlr r0; \
|
|
lwz r9, 36(r1); \
|
|
lwz r10,40(r1); \
|
|
addi r1, r1, 48
|
|
|
|
#else /* !__ASSEMBLY__ */
|
|
extern void _mcount(void);
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
# define FTRACE_ADDR ((unsigned long)ftrace_caller)
|
|
# define FTRACE_REGS_ADDR FTRACE_ADDR
|
|
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
|
{
|
|
/* reloction of mcount call site is the same as the address */
|
|
return addr;
|
|
}
|
|
|
|
struct dyn_arch_ftrace {
|
|
struct module *mod;
|
|
};
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
|
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
|
#endif
|
|
#endif
|
|
|
|
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
|
|
#if !defined(_CALL_ELF) || _CALL_ELF != 2
|
|
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
|
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
|
|
{
|
|
/*
|
|
* Compare the symbol name with the system call name. Skip the .sys or .SyS
|
|
* prefix from the symbol name and the sys prefix from the system call name and
|
|
* just match the rest. This is only needed on ppc64 since symbol names on
|
|
* 32bit do not start with a period so the generic function will work.
|
|
*/
|
|
return !strcmp(sym + 4, name + 3);
|
|
}
|
|
#endif
|
|
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_POWERPC_FTRACE */
|