2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2006-06-28 09:55:49 +08:00
|
|
|
/*
|
|
|
|
* This file contains miscellaneous low-level functions.
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
|
|
|
|
* and Paul Mackerras.
|
|
|
|
*
|
|
|
|
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
|
|
|
|
*
|
2008-01-18 12:50:30 +08:00
|
|
|
* setjmp/longjmp code by Paul Mackerras.
|
2006-06-28 09:55:49 +08:00
|
|
|
*/
|
|
|
|
#include <asm/ppc_asm.h>
|
2007-11-28 08:13:02 +08:00
|
|
|
#include <asm/unistd.h>
|
2008-01-18 12:50:30 +08:00
|
|
|
#include <asm/asm-compat.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
2016-01-14 12:33:46 +08:00
|
|
|
#include <asm/export.h>
|
2006-06-28 09:55:49 +08:00
|
|
|
|
|
|
|
.text
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns (address we are running at) - (address we were linked at)
|
|
|
|
* for use before the text and data are mapped to KERNELBASE.
|
|
|
|
|
|
|
|
* add_reloc_offset(x) returns x + reloc_offset().
|
|
|
|
*/
|
2018-04-17 19:23:10 +08:00
|
|
|
|
|
|
|
_GLOBAL(reloc_offset)
|
|
|
|
li r3, 0
|
2006-06-28 09:55:49 +08:00
|
|
|
_GLOBAL(add_reloc_offset)
|
|
|
|
mflr r0
|
|
|
|
bl 1f
|
|
|
|
1: mflr r5
|
2008-08-30 09:41:12 +08:00
|
|
|
PPC_LL r4,(2f-1b)(r5)
|
2006-06-28 09:55:49 +08:00
|
|
|
subf r5,r4,r5
|
|
|
|
add r3,r3,r5
|
|
|
|
mtlr r0
|
|
|
|
blr
|
2020-04-01 00:03:44 +08:00
|
|
|
_ASM_NOKPROBE_SYMBOL(reloc_offset)
|
|
|
|
_ASM_NOKPROBE_SYMBOL(add_reloc_offset)
|
2007-11-28 08:13:02 +08:00
|
|
|
|
2008-08-30 09:41:12 +08:00
|
|
|
.align 3
|
|
|
|
2: PPC_LONG 1b
|
|
|
|
|
2008-01-18 12:50:30 +08:00
|
|
|
_GLOBAL(setjmp)
|
|
|
|
mflr r0
|
|
|
|
PPC_STL r0,0(r3)
|
|
|
|
PPC_STL r1,SZL(r3)
|
|
|
|
PPC_STL r2,2*SZL(r3)
|
2018-04-18 01:08:18 +08:00
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
mfcr r12
|
|
|
|
stmw r12, 3*SZL(r3)
|
|
|
|
#else
|
2008-01-18 12:50:30 +08:00
|
|
|
mfcr r0
|
|
|
|
PPC_STL r0,3*SZL(r3)
|
|
|
|
PPC_STL r13,4*SZL(r3)
|
|
|
|
PPC_STL r14,5*SZL(r3)
|
|
|
|
PPC_STL r15,6*SZL(r3)
|
|
|
|
PPC_STL r16,7*SZL(r3)
|
|
|
|
PPC_STL r17,8*SZL(r3)
|
|
|
|
PPC_STL r18,9*SZL(r3)
|
|
|
|
PPC_STL r19,10*SZL(r3)
|
|
|
|
PPC_STL r20,11*SZL(r3)
|
|
|
|
PPC_STL r21,12*SZL(r3)
|
|
|
|
PPC_STL r22,13*SZL(r3)
|
|
|
|
PPC_STL r23,14*SZL(r3)
|
|
|
|
PPC_STL r24,15*SZL(r3)
|
|
|
|
PPC_STL r25,16*SZL(r3)
|
|
|
|
PPC_STL r26,17*SZL(r3)
|
|
|
|
PPC_STL r27,18*SZL(r3)
|
|
|
|
PPC_STL r28,19*SZL(r3)
|
|
|
|
PPC_STL r29,20*SZL(r3)
|
|
|
|
PPC_STL r30,21*SZL(r3)
|
|
|
|
PPC_STL r31,22*SZL(r3)
|
2018-04-18 01:08:18 +08:00
|
|
|
#endif
|
2008-01-18 12:50:30 +08:00
|
|
|
li r3,0
|
|
|
|
blr
|
|
|
|
|
|
|
|
_GLOBAL(longjmp)
|
2018-04-18 01:08:18 +08:00
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
lmw r12, 3*SZL(r3)
|
|
|
|
mtcrf 0x38, r12
|
|
|
|
#else
|
2018-04-18 01:08:16 +08:00
|
|
|
PPC_LL r13,4*SZL(r3)
|
2008-01-18 12:50:30 +08:00
|
|
|
PPC_LL r14,5*SZL(r3)
|
|
|
|
PPC_LL r15,6*SZL(r3)
|
|
|
|
PPC_LL r16,7*SZL(r3)
|
|
|
|
PPC_LL r17,8*SZL(r3)
|
|
|
|
PPC_LL r18,9*SZL(r3)
|
|
|
|
PPC_LL r19,10*SZL(r3)
|
|
|
|
PPC_LL r20,11*SZL(r3)
|
|
|
|
PPC_LL r21,12*SZL(r3)
|
|
|
|
PPC_LL r22,13*SZL(r3)
|
|
|
|
PPC_LL r23,14*SZL(r3)
|
|
|
|
PPC_LL r24,15*SZL(r3)
|
|
|
|
PPC_LL r25,16*SZL(r3)
|
|
|
|
PPC_LL r26,17*SZL(r3)
|
|
|
|
PPC_LL r27,18*SZL(r3)
|
|
|
|
PPC_LL r28,19*SZL(r3)
|
|
|
|
PPC_LL r29,20*SZL(r3)
|
|
|
|
PPC_LL r30,21*SZL(r3)
|
|
|
|
PPC_LL r31,22*SZL(r3)
|
|
|
|
PPC_LL r0,3*SZL(r3)
|
|
|
|
mtcrf 0x38,r0
|
2018-04-18 01:08:18 +08:00
|
|
|
#endif
|
2008-01-18 12:50:30 +08:00
|
|
|
PPC_LL r0,0(r3)
|
|
|
|
PPC_LL r1,SZL(r3)
|
|
|
|
PPC_LL r2,2*SZL(r3)
|
|
|
|
mtlr r0
|
2018-04-18 01:08:16 +08:00
|
|
|
mr. r3, r4
|
|
|
|
bnelr
|
|
|
|
li r3, 1
|
2008-01-18 12:50:30 +08:00
|
|
|
blr
|
powerpc: Reimplement __get_SP() as a function not a define
Li Zhong points out an issue with our current __get_SP()
implementation. If ftrace function tracing is enabled (ie -pg
profiling using _mcount) we spill a stack frame on 64bit all the
time.
If a function calls __get_SP() and later calls a function that is
tail call optimised, we will pop the stack frame and the value
returned by __get_SP() is no longer valid. An example from Li can
be found in save_stack_trace -> save_context_stack:
c0000000000432c0 <.save_stack_trace>:
c0000000000432c0: mflr r0
c0000000000432c4: std r0,16(r1)
c0000000000432c8: stdu r1,-128(r1) <-- stack frame for _mcount
c0000000000432cc: std r3,112(r1)
c0000000000432d0: bl <._mcount>
c0000000000432d4: nop
c0000000000432d8: mr r4,r1 <-- __get_SP()
c0000000000432dc: ld r5,632(r13)
c0000000000432e0: ld r3,112(r1)
c0000000000432e4: li r6,1
c0000000000432e8: addi r1,r1,128 <-- pop stack frame
c0000000000432ec: ld r0,16(r1)
c0000000000432f0: mtlr r0
c0000000000432f4: b <.save_context_stack> <-- tail call optimized
save_context_stack ends up with a stack pointer below the current
one, and it is likely to be scribbled over.
Fix this by making __get_SP() a function which returns the
callers stack frame. Also replace inline assembly which grabs
the stack pointer in save_stack_trace and show_stack with
__get_SP().
This also fixes an issue with perf_arch_fetch_caller_regs().
It currently unwinds the stack once, which will skip a
valid stack frame on a leaf function. With the __get_SP() fixes
in this patch, we never need to unwind the stack frame to get
to the first interesting frame.
We have to export __get_SP() because perf_arch_fetch_caller_regs()
(which is used in modules) calls it from a header file.
Reported-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-13 16:41:38 +08:00
|
|
|
|
2020-02-20 19:51:37 +08:00
|
|
|
_GLOBAL(current_stack_frame)
|
powerpc: Reimplement __get_SP() as a function not a define
Li Zhong points out an issue with our current __get_SP()
implementation. If ftrace function tracing is enabled (ie -pg
profiling using _mcount) we spill a stack frame on 64bit all the
time.
If a function calls __get_SP() and later calls a function that is
tail call optimised, we will pop the stack frame and the value
returned by __get_SP() is no longer valid. An example from Li can
be found in save_stack_trace -> save_context_stack:
c0000000000432c0 <.save_stack_trace>:
c0000000000432c0: mflr r0
c0000000000432c4: std r0,16(r1)
c0000000000432c8: stdu r1,-128(r1) <-- stack frame for _mcount
c0000000000432cc: std r3,112(r1)
c0000000000432d0: bl <._mcount>
c0000000000432d4: nop
c0000000000432d8: mr r4,r1 <-- __get_SP()
c0000000000432dc: ld r5,632(r13)
c0000000000432e0: ld r3,112(r1)
c0000000000432e4: li r6,1
c0000000000432e8: addi r1,r1,128 <-- pop stack frame
c0000000000432ec: ld r0,16(r1)
c0000000000432f0: mtlr r0
c0000000000432f4: b <.save_context_stack> <-- tail call optimized
save_context_stack ends up with a stack pointer below the current
one, and it is likely to be scribbled over.
Fix this by making __get_SP() a function which returns the
callers stack frame. Also replace inline assembly which grabs
the stack pointer in save_stack_trace and show_stack with
__get_SP().
This also fixes an issue with perf_arch_fetch_caller_regs().
It currently unwinds the stack once, which will skip a
valid stack frame on a leaf function. With the __get_SP() fixes
in this patch, we never need to unwind the stack frame to get
to the first interesting frame.
We have to export __get_SP() because perf_arch_fetch_caller_regs()
(which is used in modules) calls it from a header file.
Reported-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2014-10-13 16:41:38 +08:00
|
|
|
PPC_LL r3,0(r1)
|
|
|
|
blr
|
2020-02-20 19:51:37 +08:00
|
|
|
EXPORT_SYMBOL(current_stack_frame)
|