linux/arch/arm64/kernel/efi-rt-wrapper.S
Will Deacon 082af5ec50 Merge branch 'for-next/scs' into for-next/core
Support for Clang's Shadow Call Stack in the kernel
(Sami Tolvanen and Will Deacon)
* for-next/scs:
  arm64: entry-ftrace.S: Update comment to indicate that x18 is live
  scs: Move DEFINE_SCS macro into core code
  scs: Remove references to asm/scs.h from core code
  scs: Move scs_overflow_check() out of architecture code
  arm64: scs: Use 'scs_sp' register alias for x18
  scs: Move accounting into alloc/free functions
  arm64: scs: Store absolute SCS stack pointer value in thread_info
  efi/libstub: Disable Shadow Call Stack
  arm64: scs: Add shadow stacks for SDEI
  arm64: Implement Shadow Call Stack
  arm64: Disable SCS for hypervisor code
  arm64: vdso: Disable Shadow Call Stack
  arm64: efi: Restore register x18 if it was corrupted
  arm64: Preserve register x18 when CPU is suspended
  arm64: Reserve register x18 from general allocation with SCS
  scs: Disable when function graph tracing is enabled
  scs: Add support for stack usage debugging
  scs: Add page accounting for shadow call stack allocations
  scs: Add support for Clang's Shadow Call Stack (SCS)
2020-05-28 18:03:40 +01:00

48 lines
1.1 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
#include <linux/linkage.h>
SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x29, x30, [sp, #-32]!
mov x29, sp
/*
* Register x18 is designated as the 'platform' register by the AAPCS,
* which means firmware running at the same exception level as the OS
* (such as UEFI) should never touch it.
*/
stp x1, x18, [sp, #16]
/*
* We are lucky enough that no EFI runtime services take more than
* 5 arguments, so all are passed in registers rather than via the
* stack.
*/
mov x8, x0
mov x0, x2
mov x1, x3
mov x2, x4
mov x3, x5
mov x4, x6
blr x8
ldp x1, x2, [sp, #16]
cmp x2, x18
ldp x29, x30, [sp], #32
b.ne 0f
ret
0:
/*
* With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a
* shadow stack pointer, which we need to restore before returning to
* potentially instrumented code. This is safe because the wrapper is
* called with preemption disabled and a separate shadow stack is used
* for interrupts.
*/
mov x18, x2
b efi_handle_corrupted_x18 // tail call
SYM_FUNC_END(__efi_rt_asm_wrapper)