mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 02:04:19 +08:00
c612505f86
If memory is located above 1<<VA_BITS, kvm adds an extra level to its page tables, merging the runtime tables and boot tables that contain the idmap. This lets us avoid the trampoline dance during initialisation. This also means there is no trampoline page mapped, so __cpu_reset_hyp_mode() can't call __kvm_hyp_reset() in this page. The good news is the idmap is still mapped, so we don't need the trampoline page. The bad news is we can't call it directly as the idmap is above HYP_PAGE_OFFSET, so its address is masked by kvm_call_hyp. Add a function __extended_idmap_trampoline which will branch into __kvm_hyp_reset in the idmap, change kvm_hyp_reset_entry() to return this address if __kvm_cpu_uses_extended_idmap(). In this case __kvm_hyp_reset() will still switch to the boot tables (which are the merged tables that were already in use), and branch into the idmap (where it already was). This fixes boot failures on these systems, where we fail to execute the missing trampoline page when tearing down kvm in init_subsystems(): [ 2.508922] kvm [1]: 8-bit VMID [ 2.512057] kvm [1]: Hyp mode initialized successfully [ 2.517242] kvm [1]: interrupt-controller@e1140000 IRQ13 [ 2.522622] kvm [1]: timer IRQ3 [ 2.525783] Kernel panic - not syncing: HYP panic: [ 2.525783] PS:200003c9 PC:0000007ffffff820 ESR:86000005 [ 2.525783] FAR:0000007ffffff820 HPFAR:00000000003ffff0 PAR:0000000000000000 [ 2.525783] VCPU: (null) [ 2.525783] [ 2.547667] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 4.6.0-rc5+ #1 [ 2.555137] Hardware name: Default string Default string/Default string, BIOS ROD0084E 09/03/2015 [ 2.563994] Call trace: [ 2.566432] [<ffffff80080888d0>] dump_backtrace+0x0/0x240 [ 2.571818] [<ffffff8008088b24>] show_stack+0x14/0x20 [ 2.576858] [<ffffff80083423ac>] dump_stack+0x94/0xb8 [ 2.581899] [<ffffff8008152130>] panic+0x10c/0x250 [ 2.586677] [<ffffff8008152024>] panic+0x0/0x250 [ 2.591281] SMP: stopping secondary CPUs [ 3.649692] SMP: failed to stop secondary CPUs 0-2,4-7 [ 3.654818] Kernel Offset: disabled [ 3.658293] Memory Limit: none [ 3.661337] ---[ end Kernel panic - not syncing: HYP panic: [ 3.661337] PS:200003c9 PC:0000007ffffff820 ESR:86000005 [ 3.661337] FAR:0000007ffffff820 HPFAR:00000000003ffff0 PAR:0000000000000000 [ 3.661337] VCPU: (null) [ 3.661337] Reported-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
186 lines
4.8 KiB
ArmAsm
186 lines
4.8 KiB
ArmAsm
/*
|
|
* Copyright (C) 2015 - ARM Ltd
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/fpsimdmacros.h>
|
|
#include <asm/kvm.h>
|
|
#include <asm/kvm_arm.h>
|
|
#include <asm/kvm_asm.h>
|
|
#include <asm/kvm_mmu.h>
|
|
|
|
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
|
|
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
|
|
|
|
.text
|
|
.pushsection .hyp.text, "ax"
|
|
|
|
.macro save_callee_saved_regs ctxt
|
|
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
|
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
|
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
|
stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
|
stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
|
stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
|
.endm
|
|
|
|
.macro restore_callee_saved_regs ctxt
|
|
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
|
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
|
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
|
ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
|
|
ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
|
|
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
|
|
.endm
|
|
|
|
/*
|
|
* u64 __guest_enter(struct kvm_vcpu *vcpu,
|
|
* struct kvm_cpu_context *host_ctxt);
|
|
*/
|
|
ENTRY(__guest_enter)
|
|
// x0: vcpu
|
|
// x1: host/guest context
|
|
// x2-x18: clobbered by macros
|
|
|
|
// Store the host regs
|
|
save_callee_saved_regs x1
|
|
|
|
// Preserve vcpu & host_ctxt for use at exit time
|
|
stp x0, x1, [sp, #-16]!
|
|
|
|
add x1, x0, #VCPU_CONTEXT
|
|
|
|
// Prepare x0-x1 for later restore by pushing them onto the stack
|
|
ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
|
|
stp x2, x3, [sp, #-16]!
|
|
|
|
// x2-x18
|
|
ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
|
|
ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
|
|
ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
|
|
ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
|
|
ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
|
|
ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
|
|
ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
|
|
ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
|
|
ldr x18, [x1, #CPU_XREG_OFFSET(18)]
|
|
|
|
// x19-x29, lr
|
|
restore_callee_saved_regs x1
|
|
|
|
// Last bits of the 64bit state
|
|
ldp x0, x1, [sp], #16
|
|
|
|
// Do not touch any register after this!
|
|
eret
|
|
ENDPROC(__guest_enter)
|
|
|
|
ENTRY(__guest_exit)
|
|
// x0: vcpu
|
|
// x1: return code
|
|
// x2-x3: free
|
|
// x4-x29,lr: vcpu regs
|
|
// vcpu x0-x3 on the stack
|
|
|
|
add x2, x0, #VCPU_CONTEXT
|
|
|
|
stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
|
|
stp x6, x7, [x2, #CPU_XREG_OFFSET(6)]
|
|
stp x8, x9, [x2, #CPU_XREG_OFFSET(8)]
|
|
stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
|
|
stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
|
|
stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
|
|
stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
|
|
str x18, [x2, #CPU_XREG_OFFSET(18)]
|
|
|
|
ldp x6, x7, [sp], #16 // x2, x3
|
|
ldp x4, x5, [sp], #16 // x0, x1
|
|
|
|
stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
|
|
stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
|
|
|
|
save_callee_saved_regs x2
|
|
|
|
// Restore vcpu & host_ctxt from the stack
|
|
// (preserving return code in x1)
|
|
ldp x0, x2, [sp], #16
|
|
// Now restore the host regs
|
|
restore_callee_saved_regs x2
|
|
|
|
mov x0, x1
|
|
ret
|
|
ENDPROC(__guest_exit)
|
|
|
|
ENTRY(__fpsimd_guest_restore)
|
|
stp x4, lr, [sp, #-16]!
|
|
|
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
|
mrs x2, cptr_el2
|
|
bic x2, x2, #CPTR_EL2_TFP
|
|
msr cptr_el2, x2
|
|
alternative_else
|
|
mrs x2, cpacr_el1
|
|
orr x2, x2, #CPACR_EL1_FPEN
|
|
msr cpacr_el1, x2
|
|
alternative_endif
|
|
isb
|
|
|
|
mrs x3, tpidr_el2
|
|
|
|
ldr x0, [x3, #VCPU_HOST_CONTEXT]
|
|
kern_hyp_va x0
|
|
add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
|
|
bl __fpsimd_save_state
|
|
|
|
add x2, x3, #VCPU_CONTEXT
|
|
add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
|
|
bl __fpsimd_restore_state
|
|
|
|
// Skip restoring fpexc32 for AArch64 guests
|
|
mrs x1, hcr_el2
|
|
tbnz x1, #HCR_RW_SHIFT, 1f
|
|
ldr x4, [x3, #VCPU_FPEXC32_EL2]
|
|
msr fpexc32_el2, x4
|
|
1:
|
|
ldp x4, lr, [sp], #16
|
|
ldp x2, x3, [sp], #16
|
|
ldp x0, x1, [sp], #16
|
|
|
|
eret
|
|
ENDPROC(__fpsimd_guest_restore)
|
|
|
|
/*
|
|
* When using the extended idmap, we don't have a trampoline page we can use
|
|
* while we switch pages tables during __kvm_hyp_reset. Accessing the idmap
|
|
* directly would be ideal, but if we're using the extended idmap then the
|
|
* idmap is located above HYP_PAGE_OFFSET, and the address will be masked by
|
|
* kvm_call_hyp using kern_hyp_va.
|
|
*
|
|
* x0: HYP boot pgd
|
|
* x1: HYP phys_idmap_start
|
|
*/
|
|
ENTRY(__extended_idmap_trampoline)
|
|
mov x4, x1
|
|
adr_l x3, __kvm_hyp_reset
|
|
|
|
/* insert __kvm_hyp_reset()s offset into phys_idmap_start */
|
|
bfi x4, x3, #0, #PAGE_SHIFT
|
|
br x4
|
|
ENDPROC(__extended_idmap_trampoline)
|