2019-08-14 07:04:50 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
/*
|
|
|
|
* Linker script variables to be set after section resolution, as
|
|
|
|
* ld.lld does not like variables assigned before SECTIONS is processed.
|
|
|
|
*/
|
|
|
|
#ifndef __ARM64_KERNEL_IMAGE_VARS_H
|
|
|
|
#define __ARM64_KERNEL_IMAGE_VARS_H
|
|
|
|
|
|
|
|
#ifndef LINKER_SCRIPT
|
|
|
|
#error This file should only be included in vmlinux.lds.S
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_EFI
|
|
|
|
|
2020-02-17 19:44:37 +08:00
|
|
|
__efistub_kernel_size = _edata - _text;
|
2020-03-27 01:14:23 +08:00
|
|
|
__efistub_primary_entry_offset = primary_entry - _text;
|
2020-02-17 19:44:37 +08:00
|
|
|
|
2019-08-14 07:04:50 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
|
|
|
|
* isolate it from the kernel proper. The following symbols are legally
|
|
|
|
* accessed by the stub, so provide some aliases to make them accessible.
|
|
|
|
* Only include data symbols here, or text symbols of functions that are
|
|
|
|
* guaranteed to be safe when executed at another offset than they were
|
|
|
|
* linked at. The routines below are all implemented in assembler in a
|
|
|
|
* position independent manner
|
|
|
|
*/
|
|
|
|
__efistub_memcmp = __pi_memcmp;
|
|
|
|
__efistub_memchr = __pi_memchr;
|
|
|
|
__efistub_memcpy = __pi_memcpy;
|
|
|
|
__efistub_memmove = __pi_memmove;
|
|
|
|
__efistub_memset = __pi_memset;
|
|
|
|
__efistub_strlen = __pi_strlen;
|
|
|
|
__efistub_strnlen = __pi_strnlen;
|
|
|
|
__efistub_strcmp = __pi_strcmp;
|
|
|
|
__efistub_strncmp = __pi_strncmp;
|
|
|
|
__efistub_strrchr = __pi_strrchr;
|
efi/arm64: Clean EFI stub exit code from cache instead of avoiding it
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.
However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.
So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20200228121408.9075-6-ardb@kernel.org
2020-02-28 20:14:07 +08:00
|
|
|
__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
|
2019-08-14 07:04:50 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
__efistub___memcpy = __pi_memcpy;
|
|
|
|
__efistub___memmove = __pi_memmove;
|
|
|
|
__efistub___memset = __pi_memset;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
__efistub__text = _text;
|
|
|
|
__efistub__end = _end;
|
|
|
|
__efistub__edata = _edata;
|
|
|
|
__efistub_screen_info = screen_info;
|
efi/libstub: Clean up command line parsing routine
We currently parse the command non-destructively, to avoid having to
allocate memory for a copy before passing it to the standard parsing
routines that are used by the core kernel, and which modify the input
to delineate the parsed tokens with NUL characters.
Instead, we call strstr() and strncmp() to go over the input multiple
times, and match prefixes rather than tokens, which implies that we
would match, e.g., 'nokaslrfoo' in the stub and disable KASLR, while
the kernel would disregard the option and run with KASLR enabled.
In order to avoid having to reason about whether and how this behavior
may be abused, let's clean up the parsing routines, and rebuild them
on top of the existing helpers.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
2020-02-11 00:02:46 +08:00
|
|
|
__efistub__ctype = _ctype;
|
2019-08-14 07:04:50 +08:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2020-06-25 21:14:08 +08:00
|
|
|
#ifdef CONFIG_KVM
|
|
|
|
|
|
|
|
/*
|
|
|
|
* KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, to
|
|
|
|
* separate it from the kernel proper. The following symbols are legally
|
|
|
|
* accessed by it, therefore provide aliases to make them linkable.
|
|
|
|
* Do not include symbols which may not be safely accessed under hypervisor
|
|
|
|
* memory mappings.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
|
|
|
|
|
2020-06-25 21:14:10 +08:00
|
|
|
/* Symbols defined in debug-sr.c (not yet compiled with nVHE build rules). */
|
|
|
|
KVM_NVHE_ALIAS(__kvm_get_mdcr_el2);
|
|
|
|
|
2020-06-25 21:14:11 +08:00
|
|
|
/* Symbols defined in entry.S (not yet compiled with nVHE build rules). */
|
|
|
|
KVM_NVHE_ALIAS(__guest_exit);
|
|
|
|
KVM_NVHE_ALIAS(abort_guest_exit_end);
|
|
|
|
KVM_NVHE_ALIAS(abort_guest_exit_start);
|
|
|
|
|
2020-06-25 21:14:10 +08:00
|
|
|
/* Symbols defined in switch.c (not yet compiled with nVHE build rules). */
|
|
|
|
KVM_NVHE_ALIAS(__kvm_vcpu_run_nvhe);
|
2020-06-25 21:14:11 +08:00
|
|
|
KVM_NVHE_ALIAS(hyp_panic);
|
2020-06-25 21:14:10 +08:00
|
|
|
|
|
|
|
/* Symbols defined in sysreg-sr.c (not yet compiled with nVHE build rules). */
|
|
|
|
KVM_NVHE_ALIAS(__kvm_enable_ssbs);
|
|
|
|
|
|
|
|
/* Symbols defined in timer-sr.c (not yet compiled with nVHE build rules). */
|
|
|
|
KVM_NVHE_ALIAS(__kvm_timer_set_cntvoff);
|
|
|
|
|
|
|
|
/* Symbols defined in vgic-v3-sr.c (not yet compiled with nVHE build rules). */
|
|
|
|
KVM_NVHE_ALIAS(__vgic_v3_get_ich_vtr_el2);
|
|
|
|
KVM_NVHE_ALIAS(__vgic_v3_init_lrs);
|
|
|
|
KVM_NVHE_ALIAS(__vgic_v3_read_vmcr);
|
|
|
|
KVM_NVHE_ALIAS(__vgic_v3_restore_aprs);
|
|
|
|
KVM_NVHE_ALIAS(__vgic_v3_save_aprs);
|
|
|
|
KVM_NVHE_ALIAS(__vgic_v3_write_vmcr);
|
|
|
|
|
2020-06-25 21:14:11 +08:00
|
|
|
/* Alternative callbacks for init-time patching of nVHE hyp code. */
|
|
|
|
KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
|
|
|
|
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
|
|
|
|
KVM_NVHE_ALIAS(kvm_update_va_mask);
|
|
|
|
|
|
|
|
/* Global kernel state accessed by nVHE hyp code. */
|
|
|
|
KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
|
|
|
|
KVM_NVHE_ALIAS(kvm_host_data);
|
|
|
|
|
|
|
|
/* Kernel constant needed to compute idmap addresses. */
|
|
|
|
KVM_NVHE_ALIAS(kimage_voffset);
|
|
|
|
|
|
|
|
/* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */
|
|
|
|
KVM_NVHE_ALIAS(panic);
|
|
|
|
|
2020-06-25 21:14:12 +08:00
|
|
|
/* Vectors installed by hyp-init on reset HVC. */
|
|
|
|
KVM_NVHE_ALIAS(__hyp_stub_vectors);
|
|
|
|
|
|
|
|
/* IDMAP TCR_EL1.T0SZ as computed by the EL1 init code */
|
|
|
|
KVM_NVHE_ALIAS(idmap_t0sz);
|
|
|
|
|
2020-06-25 21:14:13 +08:00
|
|
|
/* Kernel symbol used by icache_is_vpipt(). */
|
|
|
|
KVM_NVHE_ALIAS(__icache_flags);
|
|
|
|
|
|
|
|
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
|
|
|
|
KVM_NVHE_ALIAS(arm64_const_caps_ready);
|
|
|
|
KVM_NVHE_ALIAS(cpu_hwcap_keys);
|
|
|
|
KVM_NVHE_ALIAS(cpu_hwcaps);
|
|
|
|
|
2020-06-25 21:14:08 +08:00
|
|
|
#endif /* CONFIG_KVM */
|
|
|
|
|
2019-08-14 07:04:50 +08:00
|
|
|
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
|