linux/tools/perf/arch/arm/tests/regs_load.S
Arnaldo Carvalho de Melo bd5c6b81dd perf bench: Update the copies of x86's mem{cpy,set}_64.S
And update linux/linkage.h, which requires in turn that we make these
files switch from ENTRY()/ENDPROC() to SYM_FUNC_START()/SYM_FUNC_END():

  tools/perf/arch/arm64/tests/regs_load.S
  tools/perf/arch/arm/tests/regs_load.S
  tools/perf/arch/powerpc/tests/regs_load.S
  tools/perf/arch/x86/tests/regs_load.S

We also need to switch SYM_FUNC_START_LOCAL() to SYM_FUNC_START() for
the functions used directly by 'perf bench', and update
tools/perf/check_headers.sh to ignore those changes when checking if the
kernel original files drifted from the copies we carry.

This is to get the changes from:

  6dcc5627f6 ("x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_*")
  ef1e03152c ("x86/asm: Make some functions local")
  e9b9d020c4 ("x86/asm: Annotate aliases")

And address these tools/perf build warnings:

  Warning: Kernel ABI header at 'tools/arch/x86/lib/memcpy_64.S' differs from latest version at 'arch/x86/lib/memcpy_64.S'
  diff -u tools/arch/x86/lib/memcpy_64.S arch/x86/lib/memcpy_64.S
  Warning: Kernel ABI header at 'tools/arch/x86/lib/memset_64.S' differs from latest version at 'arch/x86/lib/memset_64.S'
  diff -u tools/arch/x86/lib/memset_64.S arch/x86/lib/memset_64.S

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-tay3l8x8k11p7y3qcpqh9qh5@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-12-02 11:40:57 -03:00

60 lines
1.5 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#define R0 0x00
#define R1 0x08
#define R2 0x10
#define R3 0x18
#define R4 0x20
#define R5 0x28
#define R6 0x30
#define R7 0x38
#define R8 0x40
#define R9 0x48
#define SL 0x50
#define FP 0x58
#define IP 0x60
#define SP 0x68
#define LR 0x70
#define PC 0x78
/*
* Implementation of void perf_regs_load(u64 *regs);
*
* This functions fills in the 'regs' buffer from the actual registers values,
* in the way the perf built-in unwinding test expects them:
* - the PC at the time at the call to this function. Since this function
* is called using a bl instruction, the PC value is taken from LR.
* The built-in unwinding test then unwinds the call stack from the dwarf
* information in unwind__get_entries.
*
* Notes:
* - the 8 bytes stride in the registers offsets comes from the fact
* that the registers are stored in an u64 array (u64 *regs),
* - the regs buffer needs to be zeroed before the call to this function,
* in this case using a calloc in dwarf-unwind.c.
*/
.text
.type perf_regs_load,%function
SYM_FUNC_START(perf_regs_load)
str r0, [r0, #R0]
str r1, [r0, #R1]
str r2, [r0, #R2]
str r3, [r0, #R3]
str r4, [r0, #R4]
str r5, [r0, #R5]
str r6, [r0, #R6]
str r7, [r0, #R7]
str r8, [r0, #R8]
str r9, [r0, #R9]
str sl, [r0, #SL]
str fp, [r0, #FP]
str ip, [r0, #IP]
str sp, [r0, #SP]
str lr, [r0, #LR]
str lr, [r0, #PC] // store pc as lr in order to skip the call
// to this function
mov pc, lr
SYM_FUNC_END(perf_regs_load)