mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 16:44:10 +08:00
72789a4a6a
Kernel startup entry point requires disabling MMU and D-cache. As for kexec-reboot, taking a close look at "msr sctlr_el1, x12" in __cpu_soft_restart as the following: -1. booted at EL1 The instruction is enough to disable MMU and I/D cache for EL1 regime. -2. booted at EL2, using VHE Access to SCTLR_EL1 is redirected to SCTLR_EL2 in EL2. So the instruction is enough to disable MMU and clear I+C bits for EL2 regime. -3. booted at EL2, not using VHE The instruction itself can not affect EL2 regime. But The hyp-stub doesn't enable the MMU and I/D cache for EL2 regime. And KVM also disable them for EL2 regime when its unloaded, or execute a HVC_SOFT_RESTART call. So when kexec-reboot, the code in KVM has prepare the requirement. As a conclusion, disabling MMU and clearing I+C bits in SYM_CODE_START(arm64_relocate_new_kernel) is redundant, and can be removed Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Cc: James Morse <james.morse@arm.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Mark Brown <broonie@kernel.org> Cc: Kees Cook <keescook@chromium.org> Cc: Remi Denis-Courmont <remi.denis.courmont@huawei.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: kvmarm@lists.cs.columbia.edu Link: https://lore.kernel.org/r/1598621998-20563-1-git-send-email-kernelfans@gmail.com To: linux-arm-kernel@lists.infradead.org Signed-off-by: Will Deacon <will@kernel.org>
57 lines
1.4 KiB
ArmAsm
57 lines
1.4 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* CPU reset routines
|
|
*
|
|
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
* Copyright (C) 2015 Huawei Futurewei Technologies.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/sysreg.h>
|
|
#include <asm/virt.h>
|
|
|
|
.text
|
|
.pushsection .idmap.text, "awx"
|
|
|
|
/*
|
|
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
|
|
* cpu_soft_restart.
|
|
*
|
|
* @el2_switch: Flag to indicate a switch to EL2 is needed.
|
|
* @entry: Location to jump to for soft reset.
|
|
* arg0: First argument passed to @entry. (relocation list)
|
|
* arg1: Second argument passed to @entry.(physical kernel entry)
|
|
* arg2: Third argument passed to @entry. (physical dtb address)
|
|
*
|
|
* Put the CPU into the same state as it would be if it had been reset, and
|
|
* branch to what would be the reset vector. It must be executed with the
|
|
* flat identity mapping.
|
|
*/
|
|
SYM_CODE_START(__cpu_soft_restart)
|
|
/* Clear sctlr_el1 flags. */
|
|
mrs x12, sctlr_el1
|
|
mov_q x13, SCTLR_ELx_FLAGS
|
|
bic x12, x12, x13
|
|
pre_disable_mmu_workaround
|
|
/*
|
|
* either disable EL1&0 translation regime or disable EL2&0 translation
|
|
* regime if HCR_EL2.E2H == 1
|
|
*/
|
|
msr sctlr_el1, x12
|
|
isb
|
|
|
|
cbz x0, 1f // el2_switch?
|
|
mov x0, #HVC_SOFT_RESTART
|
|
hvc #0 // no return
|
|
|
|
1: mov x8, x1 // entry
|
|
mov x0, x2 // arg0
|
|
mov x1, x3 // arg1
|
|
mov x2, x4 // arg2
|
|
br x8
|
|
SYM_CODE_END(__cpu_soft_restart)
|
|
|
|
.popsection
|