mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 21:54:11 +08:00
Merge branch 'kvm-arm64/host-hvc-table' into kvmarm-master/next
Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
8c38602fb3
@ -189,8 +189,6 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __kvm_enable_ssbs(void);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
|
@ -72,6 +72,28 @@ alternative_cb kvm_update_va_mask
|
||||
alternative_cb_end
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Convert a kernel image address to a PA
|
||||
* reg: kernel address to be converted in place
|
||||
* tmp: temporary register
|
||||
*
|
||||
* The actual code generation takes place in kvm_get_kimage_voffset, and
|
||||
* the instructions below are only there to reserve the space and
|
||||
* perform the register allocation (kvm_get_kimage_voffset uses the
|
||||
* specific registers encoded in the instructions).
|
||||
*/
|
||||
.macro kimg_pa reg, tmp
|
||||
alternative_cb kvm_get_kimage_voffset
|
||||
movz \tmp, #0
|
||||
movk \tmp, #0, lsl #16
|
||||
movk \tmp, #0, lsl #32
|
||||
movk \tmp, #0, lsl #48
|
||||
alternative_cb_end
|
||||
|
||||
/* reg = __pa(reg) */
|
||||
sub \reg, \reg, \tmp
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
||||
#include <linux/pgtable.h>
|
||||
@ -98,6 +120,24 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
|
||||
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
|
||||
|
||||
static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
||||
asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
|
||||
"movk %0, #0, lsl #16\n"
|
||||
"movk %0, #0, lsl #32\n"
|
||||
"movk %0, #0, lsl #48\n",
|
||||
kvm_update_kimg_phys_offset)
|
||||
: "=r" (offset));
|
||||
|
||||
return __kern_hyp_va((v - offset) | PAGE_OFFSET);
|
||||
}
|
||||
|
||||
#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
|
||||
|
||||
#define kimg_fn_ptr(x) (typeof(x) **)(x)
|
||||
|
||||
/*
|
||||
* We currently support using a VM-specified IPA size. For backward
|
||||
* compatibility, the default IPA size is fixed to 40bits.
|
||||
|
@ -465,6 +465,7 @@
|
||||
|
||||
#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
|
||||
|
||||
#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
|
||||
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
|
||||
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
|
||||
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
|
||||
|
@ -64,13 +64,12 @@ __efistub__ctype = _ctype;
|
||||
/* Alternative callbacks for init-time patching of nVHE hyp code. */
|
||||
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
|
||||
KVM_NVHE_ALIAS(kvm_update_va_mask);
|
||||
KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset);
|
||||
KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
|
||||
|
||||
/* Global kernel state accessed by nVHE hyp code. */
|
||||
KVM_NVHE_ALIAS(kvm_vgic_global_state);
|
||||
|
||||
/* Kernel constant needed to compute idmap addresses. */
|
||||
KVM_NVHE_ALIAS(kimage_voffset);
|
||||
|
||||
/* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */
|
||||
KVM_NVHE_ALIAS(__hyp_panic_string);
|
||||
KVM_NVHE_ALIAS(panic);
|
||||
|
@ -13,8 +13,6 @@
|
||||
.text
|
||||
|
||||
SYM_FUNC_START(__host_exit)
|
||||
stp x0, x1, [sp, #-16]!
|
||||
|
||||
get_host_ctxt x0, x1
|
||||
|
||||
/* Store the host regs x2 and x3 */
|
||||
@ -99,13 +97,15 @@ SYM_FUNC_END(__hyp_do_panic)
|
||||
mrs x0, esr_el2
|
||||
lsr x0, x0, #ESR_ELx_EC_SHIFT
|
||||
cmp x0, #ESR_ELx_EC_HVC64
|
||||
ldp x0, x1, [sp], #16
|
||||
b.ne __host_exit
|
||||
|
||||
ldp x0, x1, [sp] // Don't fixup the stack yet
|
||||
|
||||
/* Check for a stub HVC call */
|
||||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.hs __host_exit
|
||||
|
||||
add sp, sp, #16
|
||||
/*
|
||||
* Compute the idmap address of __kvm_handle_stub_hvc and
|
||||
* jump there. Since we use kimage_voffset, do not use the
|
||||
@ -115,10 +115,7 @@ SYM_FUNC_END(__hyp_do_panic)
|
||||
* Preserve x0-x4, which may contain stub parameters.
|
||||
*/
|
||||
ldr x5, =__kvm_handle_stub_hvc
|
||||
ldr_l x6, kimage_voffset
|
||||
|
||||
/* x5 = __pa(x5) */
|
||||
sub x5, x5, x6
|
||||
kimg_pa x5, x6
|
||||
br x5
|
||||
.L__vect_end\@:
|
||||
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
|
||||
|
@ -12,106 +12,150 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
#define cpu_reg(ctxt, r) (ctxt)->regs.regs[r]
|
||||
#define DECLARE_REG(type, name, ctxt, reg) \
|
||||
type name = (type)cpu_reg(ctxt, (reg))
|
||||
|
||||
static void handle_host_hcall(unsigned long func_id,
|
||||
struct kvm_cpu_context *host_ctxt)
|
||||
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
unsigned long ret = 0;
|
||||
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
|
||||
|
||||
switch (func_id) {
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_vcpu_run): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)r1;
|
||||
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
|
||||
}
|
||||
|
||||
ret = __kvm_vcpu_run(kern_hyp_va(vcpu));
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context):
|
||||
__kvm_flush_vm_context();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid_ipa): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
|
||||
phys_addr_t ipa = host_ctxt->regs.regs[2];
|
||||
int level = host_ctxt->regs.regs[3];
|
||||
static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
__kvm_flush_vm_context();
|
||||
}
|
||||
|
||||
__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
|
||||
static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
|
||||
DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
|
||||
DECLARE_REG(int, level, host_ctxt, 3);
|
||||
|
||||
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct kvm_s2_mmu *mmu = (struct kvm_s2_mmu *)r1;
|
||||
__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
|
||||
}
|
||||
|
||||
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): {
|
||||
u64 cntvoff = host_ctxt->regs.regs[1];
|
||||
static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
|
||||
|
||||
__kvm_timer_set_cntvoff(cntvoff);
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_enable_ssbs):
|
||||
__kvm_enable_ssbs();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_get_ich_vtr_el2):
|
||||
ret = __vgic_v3_get_ich_vtr_el2();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_read_vmcr):
|
||||
ret = __vgic_v3_read_vmcr();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_write_vmcr): {
|
||||
u32 vmcr = host_ctxt->regs.regs[1];
|
||||
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
|
||||
}
|
||||
|
||||
__vgic_v3_write_vmcr(vmcr);
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_init_lrs):
|
||||
__vgic_v3_init_lrs();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__kvm_get_mdcr_el2):
|
||||
ret = __kvm_get_mdcr_el2();
|
||||
break;
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_save_aprs): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
|
||||
static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
|
||||
|
||||
__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
|
||||
break;
|
||||
}
|
||||
case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): {
|
||||
unsigned long r1 = host_ctxt->regs.regs[1];
|
||||
struct vgic_v3_cpu_if *cpu_if = (struct vgic_v3_cpu_if *)r1;
|
||||
__kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
|
||||
}
|
||||
|
||||
__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
/* Invalid host HVC. */
|
||||
host_ctxt->regs.regs[0] = SMCCC_RET_NOT_SUPPORTED;
|
||||
return;
|
||||
}
|
||||
static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
__kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
|
||||
}
|
||||
|
||||
host_ctxt->regs.regs[0] = SMCCC_RET_SUCCESS;
|
||||
host_ctxt->regs.regs[1] = ret;
|
||||
static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
tmp = read_sysreg_el2(SYS_SCTLR);
|
||||
tmp |= SCTLR_ELx_DSSBS;
|
||||
write_sysreg_el2(tmp, SYS_SCTLR);
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
__vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
__vgic_v3_init_lrs();
|
||||
}
|
||||
|
||||
static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
|
||||
|
||||
__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
|
||||
}
|
||||
|
||||
static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
|
||||
|
||||
__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
|
||||
}
|
||||
|
||||
typedef void (*hcall_t)(struct kvm_cpu_context *);
|
||||
|
||||
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x)
|
||||
|
||||
static const hcall_t *host_hcall[] = {
|
||||
HANDLE_FUNC(__kvm_vcpu_run),
|
||||
HANDLE_FUNC(__kvm_flush_vm_context),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
|
||||
HANDLE_FUNC(__kvm_timer_set_cntvoff),
|
||||
HANDLE_FUNC(__kvm_enable_ssbs),
|
||||
HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
|
||||
HANDLE_FUNC(__vgic_v3_read_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_write_vmcr),
|
||||
HANDLE_FUNC(__vgic_v3_init_lrs),
|
||||
HANDLE_FUNC(__kvm_get_mdcr_el2),
|
||||
HANDLE_FUNC(__vgic_v3_save_aprs),
|
||||
HANDLE_FUNC(__vgic_v3_restore_aprs),
|
||||
};
|
||||
|
||||
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(unsigned long, id, host_ctxt, 0);
|
||||
const hcall_t *kfn;
|
||||
hcall_t hfn;
|
||||
|
||||
id -= KVM_HOST_SMCCC_ID(0);
|
||||
|
||||
if (unlikely(id >= ARRAY_SIZE(host_hcall)))
|
||||
goto inval;
|
||||
|
||||
kfn = host_hcall[id];
|
||||
if (unlikely(!kfn))
|
||||
goto inval;
|
||||
|
||||
cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
|
||||
|
||||
hfn = kimg_fn_hyp_va(kfn);
|
||||
hfn(host_ctxt);
|
||||
|
||||
return;
|
||||
inval:
|
||||
cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
void handle_trap(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
u64 esr = read_sysreg_el2(SYS_ESR);
|
||||
unsigned long func_id;
|
||||
|
||||
if (ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64)
|
||||
if (unlikely(ESR_ELx_EC(esr) != ESR_ELx_EC_HVC64))
|
||||
hyp_panic();
|
||||
|
||||
func_id = host_ctxt->regs.regs[0];
|
||||
handle_host_hcall(func_id, host_ctxt);
|
||||
handle_host_hcall(host_ctxt);
|
||||
}
|
||||
|
@ -33,14 +33,3 @@ void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
|
||||
__sysreg_restore_user_state(ctxt);
|
||||
__sysreg_restore_el2_return_state(ctxt);
|
||||
}
|
||||
|
||||
void __kvm_enable_ssbs(void)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
asm volatile(
|
||||
"mrs %0, sctlr_el2\n"
|
||||
"orr %0, %0, %1\n"
|
||||
"msr sctlr_el2, %0"
|
||||
: "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
/*
|
||||
* The LSB of the HYP VA tag
|
||||
@ -201,3 +202,58 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
|
||||
AARCH64_INSN_BRANCH_NOLINK);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
u32 insn, oinsn, rd;
|
||||
|
||||
BUG_ON(nr_inst != 4);
|
||||
|
||||
/* Compute target register */
|
||||
oinsn = le32_to_cpu(*origptr);
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
|
||||
|
||||
/* movz rd, #(val & 0xffff) */
|
||||
insn = aarch64_insn_gen_movewide(rd,
|
||||
(u16)val,
|
||||
0,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_ZERO);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
|
||||
/* movk rd, #((val >> 16) & 0xffff), lsl #16 */
|
||||
insn = aarch64_insn_gen_movewide(rd,
|
||||
(u16)(val >> 16),
|
||||
16,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_KEEP);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
|
||||
/* movk rd, #((val >> 32) & 0xffff), lsl #32 */
|
||||
insn = aarch64_insn_gen_movewide(rd,
|
||||
(u16)(val >> 32),
|
||||
32,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_KEEP);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
|
||||
/* movk rd, #((val >> 48) & 0xffff), lsl #48 */
|
||||
insn = aarch64_insn_gen_movewide(rd,
|
||||
(u16)(val >> 48),
|
||||
48,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_KEEP);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
void kvm_update_kimg_phys_offset(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
|
||||
}
|
||||
|
||||
void kvm_get_kimage_voffset(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user