From e2ffceaae50883c5064641167078e5720fd8b74a Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 3 Feb 2022 17:41:54 +0000 Subject: [PATCH 01/39] KVM: arm64: Correctly treat writes to OSLSR_EL1 as undefined Writes to OSLSR_EL1 are UNDEFINED and should never trap from EL1 to EL2, but the kvm trap handler for OSLSR_EL1 handles writes via ignore_write(). This is confusing to readers of code, but should have no functional impact. For clarity, use write_to_read_only() rather than ignore_write(). If a trap is unexpectedly taken to EL2 in violation of the architecture, this will WARN_ONCE() and inject an undef into the guest. Reviewed-by: Reiji Watanabe Reviewed-by: Mark Rutland [adopted Mark's changelog suggestion, thanks!] Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220203174159.2887882-2-oupton@google.com --- arch/arm64/kvm/sys_regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4dc2fba316ff..85208acd273d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -292,7 +292,7 @@ static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { if (p->is_write) { - return ignore_write(vcpu, p); + return write_to_read_only(vcpu, p, r); } else { p->regval = (1 << 3); return true; From d42e26716d038d9689a23c193b934cdf0e2a2117 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 3 Feb 2022 17:41:55 +0000 Subject: [PATCH 02/39] KVM: arm64: Stash OSLSR_EL1 in the cpu context An upcoming change to KVM will emulate the OS Lock from the PoV of the guest. Add OSLSR_EL1 to the cpu context and handle reads using the stored value. Define some mnemonics for for handling the OSLM field and use them to make the reset value of OSLSR_EL1 more readable. Wire up a custom handler for writes from userspace and prevent any of the invariant bits from changing. Note that the OSLK bit is not invariant and will be made writable by the aforementioned change. Reviewed-by: Reiji Watanabe Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220203174159.2887882-3-oupton@google.com --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/include/asm/sysreg.h | 5 +++++ arch/arm64/kvm/sys_regs.c | 31 ++++++++++++++++++++++++------- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5bc01e62c08a..cc1cc40d89f0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -171,6 +171,7 @@ enum vcpu_sysreg { PAR_EL1, /* Physical Address Register */ MDSCR_EL1, /* Monitor Debug System Control Register */ MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */ + OSLSR_EL1, /* OS Lock Status Register */ DISR_EL1, /* Deferred Interrupt Status Register */ /* Performance Monitors Registers */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 898bee0004ae..abc85eaa453d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -129,7 +129,12 @@ #define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7) #define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0) #define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4) + #define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4) +#define SYS_OSLSR_OSLM_MASK (BIT(3) | BIT(0)) +#define SYS_OSLSR_OSLM_NI 0 +#define SYS_OSLSR_OSLM_IMPLEMENTED BIT(3) + #define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4) #define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4) #define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 85208acd273d..b8286c31e01c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -291,12 +291,28 @@ static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - if (p->is_write) { + if (p->is_write) return write_to_read_only(vcpu, p, r); - } else { - p->regval = (1 << 3); - return true; - } + + p->regval = __vcpu_sys_reg(vcpu, r->reg); + return true; +} + +static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + u64 id = sys_reg_to_index(rd); + u64 val; + int err; + + err = reg_from_user(&val, uaddr, id); + if (err) + return err; + + if (val != rd->val) + return -EINVAL; + + return 0; } static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, @@ -1448,7 +1464,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi }, - { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 }, + { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1, + SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, }, { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi }, @@ -1923,7 +1940,7 @@ static const struct sys_reg_desc cp14_regs[] = { { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, DBGBXVR(1), /* DBGOSLSR */ - { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, + { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 }, DBGBXVR(2), DBGBXVR(3), /* DBGOSDLR */ From f24adc65c5568a8d94e2693f5441de80f1ffe0d3 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 3 Feb 2022 17:41:56 +0000 Subject: [PATCH 03/39] KVM: arm64: Allow guest to set the OSLK bit Allow writes to OSLAR and forward the OSLK bit to OSLSR. Do nothing with the value for now. Reviewed-by: Reiji Watanabe Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220203174159.2887882-4-oupton@google.com --- arch/arm64/include/asm/sysreg.h | 3 +++ arch/arm64/kvm/sys_regs.c | 37 ++++++++++++++++++++++++++------- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index abc85eaa453d..906a3550fc50 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -128,12 +128,15 @@ #define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6) #define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7) #define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0) + #define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4) +#define SYS_OSLAR_OSLK BIT(0) #define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4) #define SYS_OSLSR_OSLM_MASK (BIT(3) | BIT(0)) #define SYS_OSLSR_OSLM_NI 0 #define SYS_OSLSR_OSLM_IMPLEMENTED BIT(3) +#define SYS_OSLSR_OSLK BIT(1) #define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4) #define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b8286c31e01c..b0d7240ef49f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -44,6 +44,10 @@ * 64bit interface. */ +static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); +static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); +static u64 sys_reg_to_index(const struct sys_reg_desc *reg); + static bool read_from_write_only(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *r) @@ -287,6 +291,24 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, return trap_raz_wi(vcpu, p, r); } +static bool trap_oslar_el1(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 oslsr; + + if (!p->is_write) + return read_from_write_only(vcpu, p, r); + + /* Forward the OSLK bit to OSLSR */ + oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK; + if (p->regval & SYS_OSLAR_OSLK) + oslsr |= SYS_OSLSR_OSLK; + + __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr; + return true; +} + static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -309,9 +331,14 @@ static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, if (err) return err; - if (val != rd->val) + /* + * The only modifiable bit is the OSLK bit. Refuse the write if + * userspace attempts to change any other bit in the register. + */ + if ((val ^ rd->val) & ~SYS_OSLSR_OSLK) return -EINVAL; + __vcpu_sys_reg(vcpu, rd->reg) = val; return 0; } @@ -1180,10 +1207,6 @@ static bool access_raz_id_reg(struct kvm_vcpu *vcpu, return __access_id_reg(vcpu, p, r, true); } -static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); -static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); -static u64 sys_reg_to_index(const struct sys_reg_desc *reg); - /* Visibility overrides for SVE-specific control registers */ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) @@ -1463,7 +1486,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { DBG_BCR_BVR_WCR_WVR_EL1(15), { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, - { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi }, + { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 }, { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1, SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, }, { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, @@ -1937,7 +1960,7 @@ static const struct sys_reg_desc cp14_regs[] = { DBGBXVR(0), /* DBGOSLAR */ - { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, + { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 }, DBGBXVR(1), /* DBGOSLSR */ { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 }, From 7dabf02f43a1670d13282463fc0106f01dfd6f9c Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 3 Feb 2022 17:41:57 +0000 Subject: [PATCH 04/39] KVM: arm64: Emulate the OS Lock The OS lock blocks all debug exceptions at every EL. To date, KVM has not implemented the OS lock for its guests, despite the fact that it is mandatory per the architecture. Simple context switching between the guest and host is not appropriate, as its effects are not constrained to the guest context. Emulate the OS Lock by clearing MDE and SS in MDSCR_EL1, thereby blocking all but software breakpoint instructions. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220203174159.2887882-5-oupton@google.com --- arch/arm64/include/asm/kvm_host.h | 4 ++++ arch/arm64/kvm/debug.c | 26 ++++++++++++++++++++++---- arch/arm64/kvm/sys_regs.c | 6 +++--- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cc1cc40d89f0..3c73e4de4229 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -726,6 +726,10 @@ void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); + +#define kvm_vcpu_os_lock_enabled(vcpu) \ + (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK)) + int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index db9361338b2a..4fd5c216c4bb 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -105,9 +105,11 @@ static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu) * - Userspace is using the hardware to debug the guest * (KVM_GUESTDBG_USE_HW is set). * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear). + * - The guest has enabled the OS Lock (debug exceptions are blocked). */ if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) || - !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) + !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) || + kvm_vcpu_os_lock_enabled(vcpu)) vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2); @@ -160,8 +162,8 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) kvm_arm_setup_mdcr_el2(vcpu); - /* Is Guest debugging in effect? */ - if (vcpu->guest_debug) { + /* Check if we need to use the debug registers. */ + if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) { /* Save guest debug state */ save_guest_debug_regs(vcpu); @@ -223,6 +225,19 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) trace_kvm_arm_set_regset("WAPTS", get_num_wrps(), &vcpu->arch.debug_ptr->dbg_wcr[0], &vcpu->arch.debug_ptr->dbg_wvr[0]); + + /* + * The OS Lock blocks debug exceptions in all ELs when it is + * enabled. If the guest has enabled the OS Lock, constrain its + * effects to the guest. Emulate the behavior by clearing + * MDSCR_EL1.MDE. In so doing, we ensure that host debug + * exceptions are unaffected by guest configuration of the OS + * Lock. + */ + } else if (kvm_vcpu_os_lock_enabled(vcpu)) { + mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1); + mdscr &= ~DBG_MDSCR_MDE; + vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1); } } @@ -244,7 +259,10 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) { trace_kvm_arm_clear_debug(vcpu->guest_debug); - if (vcpu->guest_debug) { + /* + * Restore the guest's debug registers if we were using them. + */ + if (vcpu->guest_debug || kvm_vcpu_os_lock_enabled(vcpu)) { restore_guest_debug_regs(vcpu); /* diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b0d7240ef49f..dd34b5ab51d4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1457,9 +1457,9 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, * Debug handling: We do trap most, if not all debug related system * registers. The implementation is good enough to ensure that a guest * can use these with minimal performance degradation. The drawback is - * that we don't implement any of the external debug, none of the - * OSlock protocol. This should be revisited if we ever encounter a - * more demanding guest... + * that we don't implement any of the external debug architecture. + * This should be revisited if we ever encounter a more demanding + * guest... */ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DC_ISW), access_dcsw }, From d134998838ac217a8427c1ddc83cf48888bb3fa3 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 3 Feb 2022 17:41:58 +0000 Subject: [PATCH 05/39] selftests: KVM: Add OSLSR_EL1 to the list of blessed regs OSLSR_EL1 is now part of the visible system register state. Add it to the get-reg-list selftest to ensure we keep it that way. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220203174159.2887882-6-oupton@google.com --- tools/testing/selftests/kvm/aarch64/get-reg-list.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c index f769fc6cd927..f12147c43464 100644 --- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c +++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c @@ -760,6 +760,7 @@ static __u64 base_regs[] = { ARM64_SYS_REG(2, 0, 0, 15, 5), ARM64_SYS_REG(2, 0, 0, 15, 6), ARM64_SYS_REG(2, 0, 0, 15, 7), + ARM64_SYS_REG(2, 0, 1, 1, 4), /* OSLSR_EL1 */ ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */ ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */ ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */ From 05c9324de1695b5e61dceca6d2ef0ab8c0f2f26b Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 3 Feb 2022 17:41:59 +0000 Subject: [PATCH 06/39] selftests: KVM: Test OS lock behavior KVM now correctly handles the OS Lock for its guests. When set, KVM blocks all debug exceptions originating from the guest. Add test cases to the debug-exceptions test to assert that software breakpoint, hardware breakpoint, watchpoint, and single-step exceptions are in fact blocked. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220203174159.2887882-7-oupton@google.com --- .../selftests/kvm/aarch64/debug-exceptions.c | 58 ++++++++++++++++++- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index ea189d83abf7..63b2178210c4 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -23,7 +23,7 @@ #define SPSR_D (1 << 9) #define SPSR_SS (1 << 21) -extern unsigned char sw_bp, hw_bp, bp_svc, bp_brk, hw_wp, ss_start; +extern unsigned char sw_bp, sw_bp2, hw_bp, hw_bp2, bp_svc, bp_brk, hw_wp, ss_start; static volatile uint64_t sw_bp_addr, hw_bp_addr; static volatile uint64_t wp_addr, wp_data_addr; static volatile uint64_t svc_addr; @@ -47,6 +47,14 @@ static void reset_debug_state(void) isb(); } +static void enable_os_lock(void) +{ + write_sysreg(1, oslar_el1); + isb(); + + GUEST_ASSERT(read_sysreg(oslsr_el1) & 2); +} + static void install_wp(uint64_t addr) { uint32_t wcr; @@ -99,6 +107,7 @@ static void guest_code(void) GUEST_SYNC(0); /* Software-breakpoint */ + reset_debug_state(); asm volatile("sw_bp: brk #0"); GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp)); @@ -152,6 +161,51 @@ static void guest_code(void) GUEST_ASSERT_EQ(ss_addr[1], PC(ss_start) + 4); GUEST_ASSERT_EQ(ss_addr[2], PC(ss_start) + 8); + GUEST_SYNC(6); + + /* OS Lock does not block software-breakpoint */ + reset_debug_state(); + enable_os_lock(); + sw_bp_addr = 0; + asm volatile("sw_bp2: brk #0"); + GUEST_ASSERT_EQ(sw_bp_addr, PC(sw_bp2)); + + GUEST_SYNC(7); + + /* OS Lock blocking hardware-breakpoint */ + reset_debug_state(); + enable_os_lock(); + install_hw_bp(PC(hw_bp2)); + hw_bp_addr = 0; + asm volatile("hw_bp2: nop"); + GUEST_ASSERT_EQ(hw_bp_addr, 0); + + GUEST_SYNC(8); + + /* OS Lock blocking watchpoint */ + reset_debug_state(); + enable_os_lock(); + write_data = '\0'; + wp_data_addr = 0; + install_wp(PC(write_data)); + write_data = 'x'; + GUEST_ASSERT_EQ(write_data, 'x'); + GUEST_ASSERT_EQ(wp_data_addr, 0); + + GUEST_SYNC(9); + + /* OS Lock blocking single-step */ + reset_debug_state(); + enable_os_lock(); + ss_addr[0] = 0; + install_ss(); + ss_idx = 0; + asm volatile("mrs x0, esr_el1\n\t" + "add x0, x0, #1\n\t" + "msr daifset, #8\n\t" + : : : "x0"); + GUEST_ASSERT_EQ(ss_addr[0], 0); + GUEST_DONE(); } @@ -223,7 +277,7 @@ int main(int argc, char *argv[]) vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ESR_EC_SVC64, guest_svc_handler); - for (stage = 0; stage < 7; stage++) { + for (stage = 0; stage < 11; stage++) { vcpu_run(vm, VCPU_ID); switch (get_ucall(vm, VCPU_ID, &uc)) { From fcc5bf89635a05e627cdd2e9ec52c989c8dfe2ab Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 18 Jan 2022 01:57:01 +0000 Subject: [PATCH 07/39] KVM: arm64: Use read/write spin lock for MMU protection Replace MMU spinlock with rwlock and update all instances of the lock being acquired with a write lock acquisition. Future commit will add a fast path for permission relaxation during dirty logging under a read lock. Signed-off-by: Jing Zhang Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220118015703.3630552-2-jingzhangos@google.com --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/mmu.c | 36 +++++++++++++++---------------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5bc01e62c08a..4866842c31e1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -50,6 +50,8 @@ #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) +#define KVM_HAVE_MMU_RWLOCK + /* * Mode of operation configurable with kvm-arm.mode early param. * See Documentation/admin-guide/kernel-parameters.txt for more information. diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index bc2aba953299..cafd5813c949 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -58,7 +58,7 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr, break; if (resched && next != end) - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); } while (addr = next, addr != end); return ret; @@ -179,7 +179,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); phys_addr_t end = start + size; - assert_spin_locked(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); WARN_ON(size & ~PAGE_MASK); WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap, may_block)); @@ -213,13 +213,13 @@ static void stage2_flush_vm(struct kvm *kvm) int idx, bkt; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, bkt, slots) stage2_flush_memslot(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); } @@ -720,13 +720,13 @@ void stage2_unmap_vm(struct kvm *kvm) idx = srcu_read_lock(&kvm->srcu); mmap_read_lock(current->mm); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, bkt, slots) stage2_unmap_memslot(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); mmap_read_unlock(current->mm); srcu_read_unlock(&kvm->srcu, idx); } @@ -736,14 +736,14 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); struct kvm_pgtable *pgt = NULL; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); pgt = mmu->pgt; if (pgt) { mmu->pgd_phys = 0; mmu->pgt = NULL; free_percpu(mmu->last_vcpu_ran); } - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); if (pgt) { kvm_pgtable_stage2_destroy(pgt); @@ -783,10 +783,10 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, if (ret) break; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot, &cache); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); if (ret) break; @@ -834,9 +834,9 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) start = memslot->base_gfn << PAGE_SHIFT; end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); stage2_wp_range(&kvm->arch.mmu, start, end); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); kvm_flush_remote_tlbs(kvm); } @@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (exec_fault && device) return -ENOEXEC; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); pgt = vcpu->arch.hw_mmu->pgt; if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; @@ -1271,7 +1271,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } out_unlock: - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); return ret != -EAGAIN ? ret : 0; @@ -1286,10 +1286,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) trace_kvm_access_fault(fault_ipa); - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); mmu = vcpu->arch.hw_mmu; kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); pte = __pte(kpte); if (pte_valid(pte)) @@ -1692,9 +1692,9 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, gpa_t gpa = slot->base_gfn << PAGE_SHIFT; phys_addr_t size = slot->npages << PAGE_SHIFT; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); unmap_stage2_range(&kvm->arch.mmu, gpa, size); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } /* From f783ef1c0e82e4fc311a972472ff61f6d1d0e22d Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 18 Jan 2022 01:57:02 +0000 Subject: [PATCH 08/39] KVM: arm64: Add fast path to handle permission relaxation during dirty logging To reduce MMU lock contention during dirty logging, all permission relaxation operations would be performed under read lock. Signed-off-by: Jing Zhang Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220118015703.3630552-3-jingzhangos@google.com --- arch/arm64/kvm/mmu.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index cafd5813c949..10df5d855d54 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1080,6 +1080,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, gfn_t gfn; kvm_pfn_t pfn; bool logging_active = memslot_is_logging(memslot); + bool logging_perm_fault = false; unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); unsigned long vma_pagesize, fault_granule; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; @@ -1114,6 +1115,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (logging_active) { force_pte = true; vma_shift = PAGE_SHIFT; + logging_perm_fault = (fault_status == FSC_PERM && write_fault); } else { vma_shift = get_vma_page_shift(vma, hva); } @@ -1212,7 +1214,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (exec_fault && device) return -ENOEXEC; - write_lock(&kvm->mmu_lock); + /* + * To reduce MMU contentions and enhance concurrency during dirty + * logging dirty logging, only acquire read lock for permission + * relaxation. + */ + if (logging_perm_fault) + read_lock(&kvm->mmu_lock); + else + write_lock(&kvm->mmu_lock); pgt = vcpu->arch.hw_mmu->pgt; if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; @@ -1271,7 +1281,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, } out_unlock: - write_unlock(&kvm->mmu_lock); + if (logging_perm_fault) + read_unlock(&kvm->mmu_lock); + else + write_unlock(&kvm->mmu_lock); kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); return ret != -EAGAIN ? ret : 0; From c340f7899af6f83bd937f8838949bb32da54c8a4 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 18 Jan 2022 01:57:03 +0000 Subject: [PATCH 09/39] KVM: selftests: Add vgic initialization for dirty log perf test for ARM For ARM64, if no vgic is setup before the dirty log perf test, the userspace irqchip would be used, which would affect the dirty log perf test result. Signed-off-by: Jing Zhang Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220118015703.3630552-4-jingzhangos@google.com --- tools/testing/selftests/kvm/dirty_log_perf_test.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c index 1954b964d1cf..b501338d9430 100644 --- a/tools/testing/selftests/kvm/dirty_log_perf_test.c +++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c @@ -18,6 +18,12 @@ #include "test_util.h" #include "perf_test_util.h" #include "guest_modes.h" +#ifdef __aarch64__ +#include "aarch64/vgic.h" + +#define GICD_BASE_GPA 0x8000000ULL +#define GICR_BASE_GPA 0x80A0000ULL +#endif /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/ #define TEST_HOST_LOOP_N 2UL @@ -200,6 +206,10 @@ static void run_test(enum vm_guest_mode mode, void *arg) vm_enable_cap(vm, &cap); } +#ifdef __aarch64__ + vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA); +#endif + /* Start the iterations */ iteration = 0; host_quit = false; From 23afc82539cfcce105bf18c5c835c75e463ca349 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 24 Jan 2022 15:57:19 +0000 Subject: [PATCH 10/39] KVM: arm64: Add comments for context flush and sync callbacks Add a little bit of information on where _ctxflush_fp() and _ctxsync_fp() are called to help people unfamiliar with the code get up to speed. Signed-off-by: Mark Brown Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220124155720.3943374-2-broonie@kernel.org --- arch/arm64/kvm/fpsimd.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 2f48fd362a8c..397fdac75cb1 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -84,6 +84,11 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; } +/* + * Called just before entering the guest once we are no longer + * preemptable. Syncs the host's TIF_FOREIGN_FPSTATE with the KVM + * mirror of the flag used by the hypervisor. + */ void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) { if (test_thread_flag(TIF_FOREIGN_FPSTATE)) @@ -93,10 +98,11 @@ void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) } /* - * If the guest FPSIMD state was loaded, update the host's context - * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu - * so that they will be written back if the kernel clobbers them due to - * kernel-mode NEON before re-entry into the guest. + * Called just after exiting the guest. If the guest FPSIMD state + * was loaded, update the host's context tracking data mark the CPU + * FPSIMD regs as dirty and belonging to vcpu so that they will be + * written back if the kernel clobbers them due to kernel-mode NEON + * before re-entry into the guest. */ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) { From 01a244decc760b1ae2caa045647d79ff431bf37b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 24 Jan 2022 15:57:20 +0000 Subject: [PATCH 11/39] KVM: arm64: Add some more comments in kvm_hyp_handle_fpsimd() The handling for FPSIMD/SVE traps is multi stage and involves some trap manipulation which isn't quite so immediately obvious as might be desired so add a few more comments. Signed-off-by: Mark Brown Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220124155720.3943374-3-broonie@kernel.org --- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 701cfb964905..667654bd3734 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -173,6 +173,8 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) return false; /* Valid trap. Switch the context: */ + + /* First disable enough traps to allow us to update the registers */ if (has_vhe()) { reg = CPACR_EL1_FPEN; if (sve_guest) @@ -188,11 +190,13 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) } isb(); + /* Write out the host state if it's in the registers */ if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { __fpsimd_save_state(vcpu->arch.host_fpsimd_state); vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; } + /* Restore the guest state */ if (sve_guest) __hyp_sve_restore_guest(vcpu); else From 432110cd83caa655c646ec5d8ca6a9e9afb9ccba Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 24 Jan 2022 16:11:15 +0000 Subject: [PATCH 12/39] arm64/fpsimd: Clarify the purpose of using last in fpsimd_save() When saving the floating point context in fpsimd_save() we always reference the state using last-> rather than using current->. Looking at the FP code in isolation the reason for this is not entirely obvious, it's done because when KVM is running it will bind the guest context and rely on the host writing out the guest state on context switch away from the guest. There's a slight trick here in that KVM still uses TIF_FOREIGN_FPSTATE and TIF_SVE to communicate what needs to be saved, it maintains those flags and restores them when it is done running the guest so that the normal restore paths function when we return back to userspace. Add a comment to explain this to help future readers work out what's going on a bit faster. Signed-off-by: Mark Brown Reviewed-by: Catalin Marinas Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220124161115.115200-1-broonie@kernel.org --- arch/arm64/kernel/fpsimd.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 5280e098cfb5..47af76e53221 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -348,7 +348,13 @@ static void task_fpsimd_load(void) /* * Ensure FPSIMD/SVE storage in memory for the loaded context is up to - * date with respect to the CPU registers. + * date with respect to the CPU registers. Note carefully that the + * current context is the context last bound to the CPU stored in + * last, if KVM is involved this may be the guest VM context rather + * than the host thread for the VM pointed to by current. This means + * that we must always reference the state storage via last rather + * than via current, other than the TIF_ flags which KVM will + * carefully maintain for us. */ static void fpsimd_save(void) { From 417838392f2e657ee25cc30e373ff4c35a0faa90 Mon Sep 17 00:00:00 2001 From: Shameer Kolothum Date: Mon, 22 Nov 2021 12:18:41 +0000 Subject: [PATCH 13/39] KVM: arm64: Introduce a new VMID allocator for KVM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A new VMID allocator for arm64 KVM use. This is based on arm64 ASID allocator algorithm. One major deviation from the ASID allocator is the way we flush the context. Unlike ASID allocator, we expect less frequent rollover in the case of VMIDs. Hence, instead of marking the CPU as flush_pending and issuing a local context invalidation on the next context switch, we  broadcast TLB flush + I-cache invalidation over the inner shareable domain on rollover. Signed-off-by: Shameer Kolothum Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211122121844.867-2-shameerali.kolothum.thodi@huawei.com --- arch/arm64/include/asm/kvm_host.h | 4 + arch/arm64/kvm/vmid.c | 177 ++++++++++++++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100644 arch/arm64/kvm/vmid.c diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5bc01e62c08a..a79b7c1b353f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -692,6 +692,10 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +int kvm_arm_vmid_alloc_init(void); +void kvm_arm_vmid_alloc_free(void); +void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); + static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) { vcpu_arch->steal.base = GPA_INVALID; diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c new file mode 100644 index 000000000000..aa01c97f7df0 --- /dev/null +++ b/arch/arm64/kvm/vmid.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * VMID allocator. + * + * Based on Arm64 ASID allocator algorithm. + * Please refer arch/arm64/mm/context.c for detailed + * comments on algorithm. + * + * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include + +#include +#include + +static unsigned int kvm_arm_vmid_bits; +static DEFINE_RAW_SPINLOCK(cpu_vmid_lock); + +static atomic64_t vmid_generation; +static unsigned long *vmid_map; + +static DEFINE_PER_CPU(atomic64_t, active_vmids); +static DEFINE_PER_CPU(u64, reserved_vmids); + +#define VMID_MASK (~GENMASK(kvm_arm_vmid_bits - 1, 0)) +#define VMID_FIRST_VERSION (1UL << kvm_arm_vmid_bits) + +#define NUM_USER_VMIDS VMID_FIRST_VERSION +#define vmid2idx(vmid) ((vmid) & ~VMID_MASK) +#define idx2vmid(idx) vmid2idx(idx) + +#define vmid_gen_match(vmid) \ + (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits)) + +static void flush_context(void) +{ + int cpu; + u64 vmid; + + bitmap_clear(vmid_map, 0, NUM_USER_VMIDS); + + for_each_possible_cpu(cpu) { + vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); + + /* Preserve reserved VMID */ + if (vmid == 0) + vmid = per_cpu(reserved_vmids, cpu); + __set_bit(vmid2idx(vmid), vmid_map); + per_cpu(reserved_vmids, cpu) = vmid; + } + + /* + * Unlike ASID allocator, we expect less frequent rollover in + * case of VMIDs. Hence, instead of marking the CPU as + * flush_pending and issuing a local context invalidation on + * the next context-switch, we broadcast TLB flush + I-cache + * invalidation over the inner shareable domain on rollover. + */ + kvm_call_hyp(__kvm_flush_vm_context); +} + +static bool check_update_reserved_vmid(u64 vmid, u64 newvmid) +{ + int cpu; + bool hit = false; + + /* + * Iterate over the set of reserved VMIDs looking for a match + * and update to use newvmid (i.e. the same VMID in the current + * generation). + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_vmids, cpu) == vmid) { + hit = true; + per_cpu(reserved_vmids, cpu) = newvmid; + } + } + + return hit; +} + +static u64 new_vmid(struct kvm_vmid *kvm_vmid) +{ + static u32 cur_idx = 1; + u64 vmid = atomic64_read(&kvm_vmid->id); + u64 generation = atomic64_read(&vmid_generation); + + if (vmid != 0) { + u64 newvmid = generation | (vmid & ~VMID_MASK); + + if (check_update_reserved_vmid(vmid, newvmid)) { + atomic64_set(&kvm_vmid->id, newvmid); + return newvmid; + } + + if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) { + atomic64_set(&kvm_vmid->id, newvmid); + return newvmid; + } + } + + vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx); + if (vmid != NUM_USER_VMIDS) + goto set_vmid; + + /* We're out of VMIDs, so increment the global generation count */ + generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION, + &vmid_generation); + flush_context(); + + /* We have more VMIDs than CPUs, so this will always succeed */ + vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1); + +set_vmid: + __set_bit(vmid, vmid_map); + cur_idx = vmid; + vmid = idx2vmid(vmid) | generation; + atomic64_set(&kvm_vmid->id, vmid); + return vmid; +} + +void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) +{ + unsigned long flags; + u64 vmid, old_active_vmid; + + vmid = atomic64_read(&kvm_vmid->id); + + /* + * Please refer comments in check_and_switch_context() in + * arch/arm64/mm/context.c. + */ + old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids)); + if (old_active_vmid && vmid_gen_match(vmid) && + atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids), + old_active_vmid, vmid)) + return; + + raw_spin_lock_irqsave(&cpu_vmid_lock, flags); + + /* Check that our VMID belongs to the current generation. */ + vmid = atomic64_read(&kvm_vmid->id); + if (!vmid_gen_match(vmid)) + vmid = new_vmid(kvm_vmid); + + atomic64_set(this_cpu_ptr(&active_vmids), vmid); + raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); +} + +/* + * Initialize the VMID allocator + */ +int kvm_arm_vmid_alloc_init(void) +{ + kvm_arm_vmid_bits = kvm_get_vmid_bits(); + + /* + * Expect allocation after rollover to fail if we don't have + * at least one more VMID than CPUs. VMID #0 is always reserved. + */ + WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus()); + atomic64_set(&vmid_generation, VMID_FIRST_VERSION); + vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS), + sizeof(*vmid_map), GFP_KERNEL); + if (!vmid_map) + return -ENOMEM; + + return 0; +} + +void kvm_arm_vmid_alloc_free(void) +{ + kfree(vmid_map); +} From f8051e960922a9de8e42159103d5d9c697ef17ec Mon Sep 17 00:00:00 2001 From: Shameer Kolothum Date: Mon, 22 Nov 2021 12:18:42 +0000 Subject: [PATCH 14/39] KVM: arm64: Make VMID bits accessible outside of allocator Since we already set the kvm_arm_vmid_bits in the VMID allocator init function, make it accessible outside as well so that it can be used in the subsequent patch. Suggested-by: Will Deacon Signed-off-by: Shameer Kolothum Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211122121844.867-3-shameerali.kolothum.thodi@huawei.com --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kernel/image-vars.h | 3 +++ arch/arm64/kvm/vmid.c | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a79b7c1b353f..1fea13a0857e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -692,6 +692,7 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +extern unsigned int kvm_arm_vmid_bits; int kvm_arm_vmid_alloc_init(void); void kvm_arm_vmid_alloc_free(void); void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 7eaf1f7c4168..884844849c70 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -79,6 +79,9 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); /* Kernel symbol used by icache_is_vpipt(). */ KVM_NVHE_ALIAS(__icache_flags); +/* VMID bits set by the KVM VMID allocator */ +KVM_NVHE_ALIAS(kvm_arm_vmid_bits); + /* Kernel symbols needed for cpus_have_final/const_caps checks. */ KVM_NVHE_ALIAS(arm64_const_caps_ready); KVM_NVHE_ALIAS(cpu_hwcap_keys); diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index aa01c97f7df0..9aff692b6b7d 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -16,7 +16,7 @@ #include #include -static unsigned int kvm_arm_vmid_bits; +unsigned int kvm_arm_vmid_bits; static DEFINE_RAW_SPINLOCK(cpu_vmid_lock); static atomic64_t vmid_generation; From 3248136b3637e1671e4fa46e32e2122f9ec4bc3d Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Mon, 22 Nov 2021 12:18:43 +0000 Subject: [PATCH 15/39] KVM: arm64: Align the VMID allocation with the arm64 ASID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At the moment, the VMID algorithm will send an SGI to all the CPUs to force an exit and then broadcast a full TLB flush and I-Cache invalidation. This patch uses the new VMID allocator. The benefits are:    - Aligns with arm64 ASID algorithm.    - CPUs are not forced to exit at roll-over. Instead, the VMID will be marked reserved and context invalidation is broadcasted. This will reduce the IPIs traffic.   - More flexible to add support for pinned KVM VMIDs in the future.     With the new algo, the code is now adapted:     - The call to update_vmid() will be done with preemption disabled as the new algo requires to store information per-CPU. Signed-off-by: Julien Grall Signed-off-by: Shameer Kolothum Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211122121844.867-4-shameerali.kolothum.thodi@huawei.com --- arch/arm64/include/asm/kvm_host.h | 4 +- arch/arm64/include/asm/kvm_mmu.h | 4 +- arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/arm.c | 105 ++++---------------------- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 3 +- arch/arm64/kvm/mmu.c | 1 - 6 files changed, 22 insertions(+), 97 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 1fea13a0857e..47aeed22e2ad 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -71,9 +71,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); struct kvm_vmid { - /* The VMID generation used for the virt. memory system */ - u64 vmid_gen; - u32 vmid; + atomic64_t id; }; struct kvm_s2_mmu { diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 81839e9a8a24..74735a864eee 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -115,6 +115,7 @@ alternative_cb_end #include #include #include +#include void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); @@ -266,7 +267,8 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; baddr = mmu->pgd_phys; - vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT; + vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT; + vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits); return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; } diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 91861fd8b897..261644b1a6bb 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -14,7 +14,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ inject_fault.o va_layout.o handle_exit.o \ guest.o debug.o reset.o sys_regs.o \ vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \ - arch_timer.o trng.o\ + arch_timer.o trng.o vmid.o \ vgic/vgic.o vgic/vgic-init.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \ vgic/vgic-v3.o vgic/vgic-v4.o \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ecc5958e27fe..be2fd84d526b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -53,11 +53,6 @@ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); -/* The VMID used in the VTTBR */ -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); -static u32 kvm_next_vmid; -static DEFINE_SPINLOCK(kvm_vmid_lock); - static bool vgic_present; static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); @@ -489,87 +484,6 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) } #endif -/* Just ensure a guest exit from a particular CPU */ -static void exit_vm_noop(void *info) -{ -} - -void force_vm_exit(const cpumask_t *mask) -{ - preempt_disable(); - smp_call_function_many(mask, exit_vm_noop, NULL, true); - preempt_enable(); -} - -/** - * need_new_vmid_gen - check that the VMID is still valid - * @vmid: The VMID to check - * - * return true if there is a new generation of VMIDs being used - * - * The hardware supports a limited set of values with the value zero reserved - * for the host, so we check if an assigned value belongs to a previous - * generation, which requires us to assign a new value. If we're the first to - * use a VMID for the new generation, we must flush necessary caches and TLBs - * on all CPUs. - */ -static bool need_new_vmid_gen(struct kvm_vmid *vmid) -{ - u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen); - smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ - return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen); -} - -/** - * update_vmid - Update the vmid with a valid VMID for the current generation - * @vmid: The stage-2 VMID information struct - */ -static void update_vmid(struct kvm_vmid *vmid) -{ - if (!need_new_vmid_gen(vmid)) - return; - - spin_lock(&kvm_vmid_lock); - - /* - * We need to re-check the vmid_gen here to ensure that if another vcpu - * already allocated a valid vmid for this vm, then this vcpu should - * use the same vmid. - */ - if (!need_new_vmid_gen(vmid)) { - spin_unlock(&kvm_vmid_lock); - return; - } - - /* First user of a new VMID generation? */ - if (unlikely(kvm_next_vmid == 0)) { - atomic64_inc(&kvm_vmid_gen); - kvm_next_vmid = 1; - - /* - * On SMP we know no other CPUs can use this CPU's or each - * other's VMID after force_vm_exit returns since the - * kvm_vmid_lock blocks them from reentry to the guest. - */ - force_vm_exit(cpu_all_mask); - /* - * Now broadcast TLB + ICACHE invalidation over the inner - * shareable domain to make sure all data structures are - * clean. - */ - kvm_call_hyp(__kvm_flush_vm_context); - } - - WRITE_ONCE(vmid->vmid, kvm_next_vmid); - kvm_next_vmid++; - kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; - - smp_wmb(); - WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen)); - - spin_unlock(&kvm_vmid_lock); -} - static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) { return vcpu->arch.target >= 0; @@ -793,7 +707,6 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) } return kvm_request_pending(vcpu) || - need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) || xfer_to_guest_mode_work_pending(); } @@ -855,8 +768,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (!ret) ret = 1; - update_vmid(&vcpu->arch.hw_mmu->vmid); - check_vcpu_requests(vcpu); /* @@ -866,6 +777,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ preempt_disable(); + /* + * The VMID allocator only tracks active VMIDs per + * physical CPU, and therefore the VMID allocated may not be + * preserved on VMID roll-over if the task was preempted, + * making a thread's VMID inactive. So we need to call + * kvm_arm_vmid_update() in non-premptible context. + */ + kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid); + kvm_pmu_flush_hwstate(vcpu); local_irq_disable(); @@ -2161,6 +2081,12 @@ int kvm_arch_init(void *opaque) if (err) return err; + err = kvm_arm_vmid_alloc_init(); + if (err) { + kvm_err("Failed to initialize VMID allocator.\n"); + return err; + } + if (!in_hyp_mode) { err = init_hyp_mode(); if (err) @@ -2200,6 +2126,7 @@ out_hyp: if (!in_hyp_mode) teardown_hyp_mode(); out_err: + kvm_arm_vmid_alloc_free(); return err; } diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 674f10564373..78edf077fa3b 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -138,8 +138,7 @@ int kvm_host_prepare_stage2(void *pgt_pool_base) mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); mmu->pgt = &host_kvm.pgt; - WRITE_ONCE(mmu->vmid.vmid_gen, 0); - WRITE_ONCE(mmu->vmid.vmid, 0); + atomic64_set(&mmu->vmid.id, 0); return 0; } diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index bc2aba953299..cde9166edbef 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -653,7 +653,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) mmu->pgt = pgt; mmu->pgd_phys = __pa(pgt->pgd); - WRITE_ONCE(mmu->vmid.vmid_gen, 0); return 0; out_destroy_pgtable: From 100b4f092f878dc379f1fcef9ce567c25dee3473 Mon Sep 17 00:00:00 2001 From: Shameer Kolothum Date: Mon, 22 Nov 2021 12:18:44 +0000 Subject: [PATCH 16/39] KVM: arm64: Make active_vmids invalid on vCPU schedule out MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like ASID allocator, we copy the active_vmids into the reserved_vmids on a rollover. But it's unlikely that every CPU will have a vCPU as current task and we may end up unnecessarily reserving the VMID space. Hence, set active_vmids to an invalid one when scheduling out a vCPU. Signed-off-by: Shameer Kolothum Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20211122121844.867-5-shameerali.kolothum.thodi@huawei.com --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/vmid.c | 25 ++++++++++++++++++++++--- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 47aeed22e2ad..305ea13c4fd0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -694,6 +694,7 @@ extern unsigned int kvm_arm_vmid_bits; int kvm_arm_vmid_alloc_init(void); void kvm_arm_vmid_alloc_free(void); void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); +void kvm_arm_vmid_clear_active(void); static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index be2fd84d526b..418014998f18 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -417,6 +417,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_timer_vcpu_put(vcpu); kvm_vgic_put(vcpu); kvm_vcpu_pmu_restore_host(vcpu); + kvm_arm_vmid_clear_active(); vcpu->cpu = -1; } diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 9aff692b6b7d..8d5f0506fd87 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -32,6 +32,13 @@ static DEFINE_PER_CPU(u64, reserved_vmids); #define vmid2idx(vmid) ((vmid) & ~VMID_MASK) #define idx2vmid(idx) vmid2idx(idx) +/* + * As vmid #0 is always reserved, we will never allocate one + * as below and can be treated as invalid. This is used to + * set the active_vmids on vCPU schedule out. + */ +#define VMID_ACTIVE_INVALID VMID_FIRST_VERSION + #define vmid_gen_match(vmid) \ (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits)) @@ -122,6 +129,12 @@ set_vmid: return vmid; } +/* Called from vCPU sched out with preemption disabled */ +void kvm_arm_vmid_clear_active(void) +{ + atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID); +} + void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) { unsigned long flags; @@ -132,11 +145,17 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) /* * Please refer comments in check_and_switch_context() in * arch/arm64/mm/context.c. + * + * Unlike ASID allocator, we set the active_vmids to + * VMID_ACTIVE_INVALID on vCPU schedule out to avoid + * reserving the VMID space needlessly on rollover. + * Hence explicitly check here for a "!= 0" to + * handle the sync with a concurrent rollover. */ old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids)); - if (old_active_vmid && vmid_gen_match(vmid) && - atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids), - old_active_vmid, vmid)) + if (old_active_vmid != 0 && vmid_gen_match(vmid) && + 0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids), + old_active_vmid, vmid)) return; raw_spin_lock_irqsave(&cpu_vmid_lock, flags); From cc94d47ce16d4147d546e47c8248e8bd12ba5fe5 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:54 -0800 Subject: [PATCH 17/39] kvm: selftests: aarch64: fix assert in gicv3_access_reg The val argument in gicv3_access_reg can have any value when used for a read, not necessarily 0. Fix the assert by checking val only for writes. Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-2-ricarkol@google.com --- tools/testing/selftests/kvm/lib/aarch64/gic_v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c index 00f613c0583c..e4945fe66620 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -159,7 +159,7 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset, uint32_t cpu_or_dist; GUEST_ASSERT(bits_per_field <= reg_bits); - GUEST_ASSERT(*val < (1U << bits_per_field)); + GUEST_ASSERT(!write || *val < (1U << bits_per_field)); /* Some registers like IROUTER are 64 bit long. Those are currently not * supported by readl nor writel, so just asserting here until then. */ From 11024a7a0ac26dd31ddfa0f6590e158bdf9ab858 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:55 -0800 Subject: [PATCH 18/39] kvm: selftests: aarch64: pass vgic_irq guest args as a pointer The guest in vgic_irq gets its arguments in a struct. This struct used to fit nicely in a single register so vcpu_args_set() was able to pass it by value by setting x0 with it. Unfortunately, this args struct grew after some commits and some guest args became random (specically kvm_supports_irqfd). Fix this by passing the guest args as a pointer (after allocating some guest memory for it). Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-3-ricarkol@google.com --- .../testing/selftests/kvm/aarch64/vgic_irq.c | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index e6c7d7f8fbd1..b701eb80128d 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -472,10 +472,10 @@ static void test_restore_active(struct test_args *args, struct kvm_inject_desc * guest_restore_active(args, MIN_SPI, 4, f->cmd); } -static void guest_code(struct test_args args) +static void guest_code(struct test_args *args) { - uint32_t i, nr_irqs = args.nr_irqs; - bool level_sensitive = args.level_sensitive; + uint32_t i, nr_irqs = args->nr_irqs; + bool level_sensitive = args->level_sensitive; struct kvm_inject_desc *f, *inject_fns; gic_init(GIC_V3, 1, dist, redist); @@ -484,11 +484,11 @@ static void guest_code(struct test_args args) gic_irq_enable(i); for (i = MIN_SPI; i < nr_irqs; i++) - gic_irq_set_config(i, !args.level_sensitive); + gic_irq_set_config(i, !level_sensitive); - gic_set_eoi_split(args.eoi_split); + gic_set_eoi_split(args->eoi_split); - reset_priorities(&args); + reset_priorities(args); gic_set_priority_mask(CPU_PRIO_MASK); inject_fns = level_sensitive ? inject_level_fns @@ -497,17 +497,17 @@ static void guest_code(struct test_args args) local_irq_enable(); /* Start the tests. */ - for_each_supported_inject_fn(&args, inject_fns, f) { - test_injection(&args, f); - test_preemption(&args, f); - test_injection_failure(&args, f); + for_each_supported_inject_fn(args, inject_fns, f) { + test_injection(args, f); + test_preemption(args, f); + test_injection_failure(args, f); } /* Restore the active state of IRQs. This would happen when live * migrating IRQs in the middle of being handled. */ - for_each_supported_activate_fn(&args, set_active_fns, f) - test_restore_active(&args, f); + for_each_supported_activate_fn(args, set_active_fns, f) + test_restore_active(args, f); GUEST_DONE(); } @@ -739,6 +739,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) int gic_fd; struct kvm_vm *vm; struct kvm_inject_args inject_args; + vm_vaddr_t args_gva; struct test_args args = { .nr_irqs = nr_irqs, @@ -757,7 +758,9 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) vcpu_init_descriptor_tables(vm, VCPU_ID); /* Setup the guest args page (so it gets the args). */ - vcpu_args_set(vm, 0, 1, args); + args_gva = vm_vaddr_alloc_page(vm); + memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); + vcpu_args_set(vm, 0, 1, args_gva); gic_fd = vgic_v3_setup(vm, 1, nr_irqs, GICD_BASE_GPA, GICR_BASE_GPA); From 5b7898648f02083012900e48d063e51ccbdad165 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:56 -0800 Subject: [PATCH 19/39] kvm: selftests: aarch64: fix the failure check in kvm_set_gsi_routing_irqchip_check kvm_set_gsi_routing_irqchip_check(expect_failure=true) is used to check the error code returned by the kernel when trying to setup an invalid gsi routing table. The ioctl fails if "pin >= KVM_IRQCHIP_NUM_PINS", so kvm_set_gsi_routing_irqchip_check() should test the error only when "intid >= KVM_IRQCHIP_NUM_PINS+32". The issue is that the test check is "intid >= KVM_IRQCHIP_NUM_PINS", so for a case like "intid = KVM_IRQCHIP_NUM_PINS" the test wrongly assumes that the kernel will return an error. Fix this by using the right check. Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-4-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index b701eb80128d..0106fc464afe 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -573,8 +573,8 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, kvm_gsi_routing_write(vm, routing); } else { ret = _kvm_gsi_routing_write(vm, routing); - /* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */ - if (intid >= KVM_IRQCHIP_NUM_PINS) + /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */ + if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) TEST_ASSERT(ret != 0 && errno == EINVAL, "Bad intid %u did not cause KVM_SET_GSI_ROUTING " "error: rc: %i errno: %i", intid, ret, errno); From a5cd38fd9c47b23abc6df08d6ee6a71b39038185 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:57 -0800 Subject: [PATCH 20/39] kvm: selftests: aarch64: fix some vgic related comments Fix the formatting of some comments and the wording of one of them (in gicv3_access_reg). Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-5-ricarkol@google.com --- tools/testing/selftests/kvm/aarch64/vgic_irq.c | 12 ++++++++---- tools/testing/selftests/kvm/lib/aarch64/gic_v3.c | 10 ++++++---- tools/testing/selftests/kvm/lib/aarch64/vgic.c | 3 ++- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c index 0106fc464afe..f0230711fbe9 100644 --- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c +++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c @@ -306,7 +306,8 @@ static void guest_restore_active(struct test_args *args, uint32_t prio, intid, ap1r; int i; - /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs + /* + * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs * in descending order, so intid+1 can preempt intid. */ for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) { @@ -315,7 +316,8 @@ static void guest_restore_active(struct test_args *args, gic_set_priority(intid, prio); } - /* In a real migration, KVM would restore all GIC state before running + /* + * In a real migration, KVM would restore all GIC state before running * guest code. */ for (i = 0; i < num; i++) { @@ -503,7 +505,8 @@ static void guest_code(struct test_args *args) test_injection_failure(args, f); } - /* Restore the active state of IRQs. This would happen when live + /* + * Restore the active state of IRQs. This would happen when live * migrating IRQs in the middle of being handled. */ for_each_supported_activate_fn(args, set_active_fns, f) @@ -840,7 +843,8 @@ int main(int argc, char **argv) } } - /* If the user just specified nr_irqs and/or gic_version, then run all + /* + * If the user just specified nr_irqs and/or gic_version, then run all * combinations. */ if (default_args) { diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c index e4945fe66620..263bf3ed8fd5 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -19,7 +19,7 @@ struct gicv3_data { unsigned int nr_spis; }; -#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) +#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) #define DIST_BIT (1U << 31) enum gicv3_intid_range { @@ -105,7 +105,8 @@ static void gicv3_set_eoi_split(bool split) { uint32_t val; - /* All other fields are read-only, so no need to read CTLR first. In + /* + * All other fields are read-only, so no need to read CTLR first. In * fact, the kernel does the same. */ val = split ? (1U << 1) : 0; @@ -160,8 +161,9 @@ static void gicv3_access_reg(uint32_t intid, uint64_t offset, GUEST_ASSERT(bits_per_field <= reg_bits); GUEST_ASSERT(!write || *val < (1U << bits_per_field)); - /* Some registers like IROUTER are 64 bit long. Those are currently not - * supported by readl nor writel, so just asserting here until then. + /* + * This function does not support 64 bit accesses. Just asserting here + * until we implement readq/writeq. */ GUEST_ASSERT(reg_bits == 32); diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c index b3a0fca0d780..79864b941617 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -150,7 +150,8 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, attr += SZ_64K; } - /* All calls will succeed, even with invalid intid's, as long as the + /* + * All calls will succeed, even with invalid intid's, as long as the * addr part of the attr is within 32 bits (checked above). An invalid * intid will just make the read/writes point to above the intended * register space (i.e., ICPENDR after ISPENDR). From b53de63a89244c196d8a2ea76b6754e3fdb4b626 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Jan 2022 19:08:58 -0800 Subject: [PATCH 21/39] kvm: selftests: aarch64: use a tighter assert in vgic_poke_irq() vgic_poke_irq() checks that the attr argument passed to the vgic device ioctl is sane. Make this check tighter by moving it to after the last attr update. Signed-off-by: Ricardo Koller Reported-by: Reiji Watanabe Cc: Andrew Jones Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127030858.3269036-6-ricarkol@google.com --- tools/testing/selftests/kvm/lib/aarch64/vgic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c index 79864b941617..f365c32a7296 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -138,9 +138,6 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, uint64_t val; bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); - /* Check that the addr part of the attr is within 32 bits. */ - assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK); - uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; @@ -150,6 +147,9 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid, attr += SZ_64K; } + /* Check that the addr part of the attr is within 32 bits. */ + assert((attr & ~KVM_DEV_ARM_VGIC_OFFSET_MASK) == 0); + /* * All calls will succeed, even with invalid intid's, as long as the * addr part of the attr is within 32 bits (checked above). An invalid From dfefa04a90cf9a20090cfa096153d64f95b7e33f Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 8 Feb 2022 01:27:05 +0000 Subject: [PATCH 22/39] KVM: arm64: Drop unused param from kvm_psci_version() kvm_psci_version() consumes a pointer to struct kvm in addition to a vcpu pointer. Drop the kvm pointer as it is unused. While the comment suggests the explicit kvm pointer was useful for calling from hyp, there exist no such callsite in hyp. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220208012705.640444-1-oupton@google.com --- arch/arm64/kvm/psci.c | 6 +++--- include/kvm/arm_psci.h | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 3eae32876897..a0c10c11f40e 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -85,7 +85,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) if (!vcpu) return PSCI_RET_INVALID_PARAMS; if (!vcpu->arch.power_off) { - if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1) + if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) return PSCI_RET_ALREADY_ON; else return PSCI_RET_INVALID_PARAMS; @@ -392,7 +392,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) */ int kvm_psci_call(struct kvm_vcpu *vcpu) { - switch (kvm_psci_version(vcpu, vcpu->kvm)) { + switch (kvm_psci_version(vcpu)) { case KVM_ARM_PSCI_1_0: return kvm_psci_1_0_call(vcpu); case KVM_ARM_PSCI_0_2: @@ -471,7 +471,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) switch (reg->id) { case KVM_REG_ARM_PSCI_VERSION: - val = kvm_psci_version(vcpu, vcpu->kvm); + val = kvm_psci_version(vcpu); break; case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 5b58bd2fe088..297645edcaff 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -16,11 +16,7 @@ #define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 -/* - * We need the KVM pointer independently from the vcpu as we can call - * this from HYP, and need to apply kern_hyp_va on it... - */ -static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) +static inline int kvm_psci_version(struct kvm_vcpu *vcpu) { /* * Our PSCI implementation stays the same across versions from From 4c68d6c0a1757139c791ccf1a781cbd81e35a063 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Mon, 31 Jan 2022 12:40:53 +0000 Subject: [PATCH 23/39] KVM: arm64: pkvm: Implement CONFIG_DEBUG_LIST at EL2 Currently the check functions are stubbed out at EL2. Implement versions suitable for the constrained EL2 environment. Signed-off-by: Keir Fraser Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220131124114.3103337-1-keirf@google.com --- arch/arm64/kvm/hyp/nvhe/Makefile | 3 +- arch/arm64/kvm/hyp/nvhe/list_debug.c | 54 ++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/stub.c | 22 ------------ 3 files changed, 56 insertions(+), 23 deletions(-) create mode 100644 arch/arm64/kvm/hyp/nvhe/list_debug.c delete mode 100644 arch/arm64/kvm/hyp/nvhe/stub.c diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index 24b2c2425b38..f9fe4dc21b1f 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -13,10 +13,11 @@ lib-objs := clear_page.o copy_page.o memcpy.o memset.o lib-objs := $(addprefix ../../../lib/, $(lib-objs)) obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ - hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \ + hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \ cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o +obj-$(CONFIG_DEBUG_LIST) += list_debug.o obj-y += $(lib-objs) ## diff --git a/arch/arm64/kvm/hyp/nvhe/list_debug.c b/arch/arm64/kvm/hyp/nvhe/list_debug.c new file mode 100644 index 000000000000..d68abd7ea124 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/list_debug.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 - Google LLC + * Author: Keir Fraser + */ + +#include +#include + +static inline __must_check bool nvhe_check_data_corruption(bool v) +{ + return v; +} + +#define NVHE_CHECK_DATA_CORRUPTION(condition) \ + nvhe_check_data_corruption(({ \ + bool corruption = unlikely(condition); \ + if (corruption) { \ + if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ + BUG_ON(1); \ + } else \ + WARN_ON(1); \ + } \ + corruption; \ + })) + +/* The predicates checked here are taken from lib/list_debug.c. */ + +bool __list_add_valid(struct list_head *new, struct list_head *prev, + struct list_head *next) +{ + if (NVHE_CHECK_DATA_CORRUPTION(next->prev != prev) || + NVHE_CHECK_DATA_CORRUPTION(prev->next != next) || + NVHE_CHECK_DATA_CORRUPTION(new == prev || new == next)) + return false; + + return true; +} + +bool __list_del_entry_valid(struct list_head *entry) +{ + struct list_head *prev, *next; + + prev = entry->prev; + next = entry->next; + + if (NVHE_CHECK_DATA_CORRUPTION(next == LIST_POISON1) || + NVHE_CHECK_DATA_CORRUPTION(prev == LIST_POISON2) || + NVHE_CHECK_DATA_CORRUPTION(prev->next != entry) || + NVHE_CHECK_DATA_CORRUPTION(next->prev != entry)) + return false; + + return true; +} diff --git a/arch/arm64/kvm/hyp/nvhe/stub.c b/arch/arm64/kvm/hyp/nvhe/stub.c deleted file mode 100644 index c0aa6bbfd79d..000000000000 --- a/arch/arm64/kvm/hyp/nvhe/stub.c +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Stubs for out-of-line function calls caused by re-using kernel - * infrastructure at EL2. - * - * Copyright (C) 2020 - Google LLC - */ - -#include - -#ifdef CONFIG_DEBUG_LIST -bool __list_add_valid(struct list_head *new, struct list_head *prev, - struct list_head *next) -{ - return true; -} - -bool __list_del_entry_valid(struct list_head *entry) -{ - return true; -} -#endif From 5177fe91e4cf78a659aada2c9cf712db4d788481 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 27 Jan 2022 16:17:54 +0000 Subject: [PATCH 24/39] KVM: arm64: Do not change the PMU event filter after a VCPU has run Userspace can specify which events a guest is allowed to use with the KVM_ARM_VCPU_PMU_V3_FILTER attribute. The list of allowed events can be identified by a guest from reading the PMCEID{0,1}_EL0 registers. Changing the PMU event filter after a VCPU has run can cause reads of the registers performed before the filter is changed to return different values than reads performed with the new event filter in place. The architecture defines the two registers as read-only, and this behaviour contradicts that. Keep track when the first VCPU has run and deny changes to the PMU event filter to prevent this from happening. Signed-off-by: Marc Zyngier [ Alexandru E: Added commit message, updated ioctl documentation ] Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127161759.53553-2-alexandru.elisei@arm.com --- Documentation/virt/kvm/devices/vcpu.rst | 2 +- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 4 +++ arch/arm64/kvm/pmu-emul.c | 33 +++++++++++++++---------- 4 files changed, 26 insertions(+), 14 deletions(-) diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index 60a29972d3f1..d063aaee5bb7 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -70,7 +70,7 @@ irqchip. -ENODEV PMUv3 not supported or GIC not initialized -ENXIO PMUv3 not properly configured or in-kernel irqchip not configured as required prior to calling this attribute - -EBUSY PMUv3 already initialized + -EBUSY PMUv3 already initialized or a VCPU has already run -EINVAL Invalid filter range ======= ====================================================== diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5bc01e62c08a..2869259e10c0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -136,6 +136,7 @@ struct kvm_arch { /* Memory Tagging Extension enabled for the guest */ bool mte_enabled; + bool ran_once; }; struct kvm_vcpu_fault_info { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ecc5958e27fe..4783dbf66df2 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -634,6 +634,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (kvm_vm_is_protected(kvm)) kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); + mutex_lock(&kvm->lock); + kvm->arch.ran_once = true; + mutex_unlock(&kvm->lock); + return ret; } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index fbcfd4ec6f92..bc771bc1a041 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -924,6 +924,8 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { + struct kvm *kvm = vcpu->kvm; + if (!kvm_vcpu_has_pmu(vcpu)) return -ENODEV; @@ -941,7 +943,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) int __user *uaddr = (int __user *)(long)attr->addr; int irq; - if (!irqchip_in_kernel(vcpu->kvm)) + if (!irqchip_in_kernel(kvm)) return -EINVAL; if (get_user(irq, uaddr)) @@ -951,7 +953,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) if (!(irq_is_ppi(irq) || irq_is_spi(irq))) return -EINVAL; - if (!pmu_irq_is_valid(vcpu->kvm, irq)) + if (!pmu_irq_is_valid(kvm, irq)) return -EINVAL; if (kvm_arm_pmu_irq_initialized(vcpu)) @@ -966,7 +968,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) struct kvm_pmu_event_filter filter; int nr_events; - nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; + nr_events = kvm_pmu_event_mask(kvm) + 1; uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; @@ -978,12 +980,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) filter.action != KVM_PMU_EVENT_DENY)) return -EINVAL; - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&kvm->lock); - if (!vcpu->kvm->arch.pmu_filter) { - vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); - if (!vcpu->kvm->arch.pmu_filter) { - mutex_unlock(&vcpu->kvm->lock); + if (kvm->arch.ran_once) { + mutex_unlock(&kvm->lock); + return -EBUSY; + } + + if (!kvm->arch.pmu_filter) { + kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); + if (!kvm->arch.pmu_filter) { + mutex_unlock(&kvm->lock); return -ENOMEM; } @@ -994,17 +1001,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) * events, the default is to allow. */ if (filter.action == KVM_PMU_EVENT_ALLOW) - bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events); + bitmap_zero(kvm->arch.pmu_filter, nr_events); else - bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events); + bitmap_fill(kvm->arch.pmu_filter, nr_events); } if (filter.action == KVM_PMU_EVENT_ALLOW) - bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); + bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents); else - bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents); + bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&kvm->lock); return 0; } From 2093057ab879da1070c851b9e07126eaa86d0dfc Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Thu, 27 Jan 2022 16:17:55 +0000 Subject: [PATCH 25/39] perf: Fix wrong name in comment for struct perf_cpu_context Commit 0793a61d4df8 ("performance counters: core code") added the perf subsystem (then called Performance Counters) to Linux, creating the struct perf_cpu_context. The comment for the struct referred to it as a "struct perf_counter_cpu_context". Commit cdd6c482c9ff ("perf: Do the big rename: Performance Counters -> Performance Events") changed the comment to refer to a "struct perf_event_cpu_context", which was still the wrong name for the struct. Change the comment to say "struct perf_cpu_context". CC: Thomas Gleixner CC: Ingo Molnar Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127161759.53553-3-alexandru.elisei@arm.com --- include/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 733649184b27..af97dd427501 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -864,7 +864,7 @@ struct perf_event_context { #define PERF_NR_CONTEXTS 4 /** - * struct perf_event_cpu_context - per cpu event context structure + * struct perf_cpu_context - per cpu event context structure */ struct perf_cpu_context { struct perf_event_context ctx; From 46b18782147248b62f00e98a7f87abaf934951e8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 27 Jan 2022 16:17:56 +0000 Subject: [PATCH 26/39] KVM: arm64: Keep a per-VM pointer to the default PMU As we are about to allow selection of the PMU exposed to a guest, start by keeping track of the default one instead of only the PMU version. Signed-off-by: Marc Zyngier Signed-off-by: Alexandru Elisei Link: https://lore.kernel.org/r/20220127161759.53553-4-alexandru.elisei@arm.com --- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kvm/pmu-emul.c | 42 +++++++++++++++++++------------ 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2869259e10c0..57141a3a3740 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -129,7 +129,7 @@ struct kvm_arch { * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). */ unsigned long *pmu_filter; - unsigned int pmuver; + struct arm_pmu *arm_pmu; u8 pfr0_csv2; u8 pfr0_csv3; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index bc771bc1a041..b238b3d5515c 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -24,7 +24,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc); static u32 kvm_pmu_event_mask(struct kvm *kvm) { - switch (kvm->arch.pmuver) { + unsigned int pmuver; + + pmuver = kvm->arch.arm_pmu->pmuver; + + switch (pmuver) { case ID_AA64DFR0_PMUVER_8_0: return GENMASK(9, 0); case ID_AA64DFR0_PMUVER_8_1: @@ -33,7 +37,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) case ID_AA64DFR0_PMUVER_8_7: return GENMASK(15, 0); default: /* Shouldn't be here, just for sanity */ - WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); + WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); return 0; } } @@ -600,6 +604,7 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) */ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) { + struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmc *pmc; struct perf_event *event; @@ -636,7 +641,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) return; memset(&attr, 0, sizeof(struct perf_event_attr)); - attr.type = PERF_TYPE_RAW; + attr.type = arm_pmu->pmu.type; attr.size = sizeof(attr); attr.pinned = 1; attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); @@ -750,12 +755,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) static_branch_enable(&kvm_arm_pmu_available); } -static int kvm_pmu_probe_pmuver(void) +static struct arm_pmu *kvm_pmu_probe_armpmu(void) { struct perf_event_attr attr = { }; struct perf_event *event; - struct arm_pmu *pmu; - int pmuver = ID_AA64DFR0_PMUVER_IMP_DEF; + struct arm_pmu *pmu = NULL; /* * Create a dummy event that only counts user cycles. As we'll never @@ -780,19 +784,20 @@ static int kvm_pmu_probe_pmuver(void) if (IS_ERR(event)) { pr_err_once("kvm: pmu event creation failed %ld\n", PTR_ERR(event)); - return ID_AA64DFR0_PMUVER_IMP_DEF; + return NULL; } if (event->pmu) { pmu = to_arm_pmu(event->pmu); - if (pmu->pmuver) - pmuver = pmu->pmuver; + if (pmu->pmuver == 0 || + pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) + pmu = NULL; } perf_event_disable(event); perf_event_release_kernel(event); - return pmuver; + return pmu; } u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) @@ -810,7 +815,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled * as RAZ */ - if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4) + if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4) val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); base = 32; } @@ -932,11 +937,16 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) if (vcpu->arch.pmu.created) return -EBUSY; - if (!vcpu->kvm->arch.pmuver) - vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver(); - - if (vcpu->kvm->arch.pmuver == ID_AA64DFR0_PMUVER_IMP_DEF) - return -ENODEV; + mutex_lock(&kvm->lock); + if (!kvm->arch.arm_pmu) { + /* No PMU set, get the default one */ + kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); + if (!kvm->arch.arm_pmu) { + mutex_unlock(&kvm->lock); + return -ENODEV; + } + } + mutex_unlock(&kvm->lock); switch (attr->attr) { case KVM_ARM_VCPU_PMU_V3_IRQ: { From db858060b1a788fba03711793dcaff19ea43286c Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Thu, 27 Jan 2022 16:17:57 +0000 Subject: [PATCH 27/39] KVM: arm64: Keep a list of probed PMUs The ARM PMU driver calls kvm_host_pmu_init() after probing to tell KVM that a hardware PMU is available for guest emulation. Heterogeneous systems can have more than one PMU present, and the callback gets called multiple times, once for each of them. Keep track of all the PMUs available to KVM, as they're going to be needed later. Reviewed-by: Reiji Watanabe Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127161759.53553-5-alexandru.elisei@arm.com --- arch/arm64/kvm/pmu-emul.c | 25 +++++++++++++++++++++++-- include/kvm/arm_pmu.h | 5 +++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index b238b3d5515c..7bab73f85b58 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -16,6 +17,9 @@ DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available); +static LIST_HEAD(arm_pmus); +static DEFINE_MUTEX(arm_pmus_lock); + static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx); static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx); static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc); @@ -750,9 +754,26 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, void kvm_host_pmu_init(struct arm_pmu *pmu) { - if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF && - !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled()) + struct arm_pmu_entry *entry; + + if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF || + is_protected_kvm_enabled()) + return; + + mutex_lock(&arm_pmus_lock); + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + goto out_unlock; + + entry->arm_pmu = pmu; + list_add_tail(&entry->entry, &arm_pmus); + + if (list_is_singular(&arm_pmus)) static_branch_enable(&kvm_arm_pmu_available); + +out_unlock: + mutex_unlock(&arm_pmus_lock); } static struct arm_pmu *kvm_pmu_probe_armpmu(void) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index f9ed4c171d7b..20193416d214 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -29,6 +29,11 @@ struct kvm_pmu { struct irq_work overflow_work; }; +struct arm_pmu_entry { + struct list_head entry; + struct arm_pmu *arm_pmu; +}; + DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); static __always_inline bool kvm_arm_support_pmu_v3(void) From 6ee7fca2a4a023b14aa1f1f3c4f6c833116116ef Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Thu, 27 Jan 2022 16:17:58 +0000 Subject: [PATCH 28/39] KVM: arm64: Add KVM_ARM_VCPU_PMU_V3_SET_PMU attribute When KVM creates an event and there are more than one PMUs present on the system, perf_init_event() will go through the list of available PMUs and will choose the first one that can create the event. The order of the PMUs in this list depends on the probe order, which can change under various circumstances, for example if the order of the PMU nodes change in the DTB or if asynchronous driver probing is enabled on the kernel command line (with the driver_async_probe=armv8-pmu option). Another consequence of this approach is that on heteregeneous systems all virtual machines that KVM creates will use the same PMU. This might cause unexpected behaviour for userspace: when a VCPU is executing on the physical CPU that uses this default PMU, PMU events in the guest work correctly; but when the same VCPU executes on another CPU, PMU events in the guest will suddenly stop counting. Fortunately, perf core allows user to specify on which PMU to create an event by using the perf_event_attr->type field, which is used by perf_init_event() as an index in the radix tree of available PMUs. Add the KVM_ARM_VCPU_PMU_V3_CTRL(KVM_ARM_VCPU_PMU_V3_SET_PMU) VCPU attribute to allow userspace to specify the arm_pmu that KVM will use when creating events for that VCPU. KVM will make no attempt to run the VCPU on the physical CPUs that share the PMU, leaving it up to userspace to manage the VCPU threads' affinity accordingly. To ensure that KVM doesn't expose an asymmetric system to the guest, the PMU set for one VCPU will be used by all other VCPUs. Once a VCPU has run, the PMU cannot be changed in order to avoid changing the list of available events for a VCPU, or to change the semantics of existing events. Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127161759.53553-6-alexandru.elisei@arm.com --- Documentation/virt/kvm/devices/vcpu.rst | 28 +++++++++++++++++ arch/arm64/include/uapi/asm/kvm.h | 1 + arch/arm64/kvm/pmu-emul.c | 40 +++++++++++++++++++++++++ tools/arch/arm64/include/uapi/asm/kvm.h | 1 + 4 files changed, 70 insertions(+) diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index d063aaee5bb7..e8c5770590a2 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -104,6 +104,34 @@ hardware event. Filtering event 0x1E (CHAIN) has no effect either, as it isn't strictly speaking an event. Filtering the cycle counter is possible using event 0x11 (CPU_CYCLES). +1.4 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_SET_PMU +------------------------------------------ + +:Parameters: in kvm_device_attr.addr the address to an int representing the PMU + identifier. + +:Returns: + + ======= ==================================================== + -EBUSY PMUv3 already initialized, a VCPU has already run or + an event filter has already been set + -EFAULT Error accessing the PMU identifier + -ENXIO PMU not found + -ENODEV PMUv3 not supported or GIC not initialized + -ENOMEM Could not allocate memory + ======= ==================================================== + +Request that the VCPU uses the specified hardware PMU when creating guest events +for the purpose of PMU emulation. The PMU identifier can be read from the "type" +file for the desired PMU instance under /sys/devices (or, equivalent, +/sys/bus/even_source). This attribute is particularly useful on heterogeneous +systems where there are at least two CPU PMUs on the system. The PMU that is set +for one VCPU will be used by all the other VCPUs. It isn't possible to set a PMU +if a PMU event filter is already present. + +Note that KVM will not make any attempts to run the VCPU on the physical CPUs +associated with the PMU specified by this attribute. This is entirely left to +userspace. 2. GROUP: KVM_ARM_VCPU_TIMER_CTRL ================================= diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index b3edde68bc3e..1d0a0a2a9711 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -362,6 +362,7 @@ struct kvm_arm_copy_mte_tags { #define KVM_ARM_VCPU_PMU_V3_IRQ 0 #define KVM_ARM_VCPU_PMU_V3_INIT 1 #define KVM_ARM_VCPU_PMU_V3_FILTER 2 +#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 7bab73f85b58..18361f367495 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -948,6 +948,36 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) return true; } +static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) +{ + struct kvm *kvm = vcpu->kvm; + struct arm_pmu_entry *entry; + struct arm_pmu *arm_pmu; + int ret = -ENXIO; + + mutex_lock(&kvm->lock); + mutex_lock(&arm_pmus_lock); + + list_for_each_entry(entry, &arm_pmus, entry) { + arm_pmu = entry->arm_pmu; + if (arm_pmu->pmu.type == pmu_id) { + if (kvm->arch.ran_once || + (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { + ret = -EBUSY; + break; + } + + kvm->arch.arm_pmu = arm_pmu; + ret = 0; + break; + } + } + + mutex_unlock(&arm_pmus_lock); + mutex_unlock(&kvm->lock); + return ret; +} + int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { struct kvm *kvm = vcpu->kvm; @@ -1046,6 +1076,15 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return 0; } + case KVM_ARM_VCPU_PMU_V3_SET_PMU: { + int __user *uaddr = (int __user *)(long)attr->addr; + int pmu_id; + + if (get_user(pmu_id, uaddr)) + return -EFAULT; + + return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); + } case KVM_ARM_VCPU_PMU_V3_INIT: return kvm_arm_pmu_v3_init(vcpu); } @@ -1083,6 +1122,7 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) case KVM_ARM_VCPU_PMU_V3_IRQ: case KVM_ARM_VCPU_PMU_V3_INIT: case KVM_ARM_VCPU_PMU_V3_FILTER: + case KVM_ARM_VCPU_PMU_V3_SET_PMU: if (kvm_vcpu_has_pmu(vcpu)) return 0; } diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index b3edde68bc3e..1d0a0a2a9711 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -362,6 +362,7 @@ struct kvm_arm_copy_mte_tags { #define KVM_ARM_VCPU_PMU_V3_IRQ 0 #define KVM_ARM_VCPU_PMU_V3_INIT 1 #define KVM_ARM_VCPU_PMU_V3_FILTER 2 +#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3 #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 From 583cda1b0e7d5d49db5fc15db623166310e36bf6 Mon Sep 17 00:00:00 2001 From: Alexandru Elisei Date: Thu, 27 Jan 2022 16:17:59 +0000 Subject: [PATCH 29/39] KVM: arm64: Refuse to run VCPU if the PMU doesn't match the physical CPU Userspace can assign a PMU to a VCPU with the KVM_ARM_VCPU_PMU_V3_SET_PMU device ioctl. If the VCPU is scheduled on a physical CPU which has a different PMU, the perf events needed to emulate a guest PMU won't be scheduled in and the guest performance counters will stop counting. Treat it as an userspace error and refuse to run the VCPU in this situation. Suggested-by: Marc Zyngier Signed-off-by: Alexandru Elisei Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220127161759.53553-7-alexandru.elisei@arm.com --- Documentation/virt/kvm/devices/vcpu.rst | 6 +++++- arch/arm64/include/asm/kvm_host.h | 12 ++++++++++++ arch/arm64/include/uapi/asm/kvm.h | 3 +++ arch/arm64/kvm/arm.c | 17 +++++++++++++++++ arch/arm64/kvm/pmu-emul.c | 1 + 5 files changed, 38 insertions(+), 1 deletion(-) diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index e8c5770590a2..260db203a1e2 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -131,7 +131,11 @@ if a PMU event filter is already present. Note that KVM will not make any attempts to run the VCPU on the physical CPUs associated with the PMU specified by this attribute. This is entirely left to -userspace. +userspace. However, attempting to run the VCPU on a physical CPU not supported +by the PMU will fail and KVM_RUN will return with +exit_reason = KVM_EXIT_FAIL_ENTRY and populate the fail_entry struct by setting +hardare_entry_failure_reason field to KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED and +the cpu field to the processor id. 2. GROUP: KVM_ARM_VCPU_TIMER_CTRL ================================= diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 57141a3a3740..0bed0e33e1ae 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -131,6 +131,8 @@ struct kvm_arch { unsigned long *pmu_filter; struct arm_pmu *arm_pmu; + cpumask_var_t supported_cpus; + u8 pfr0_csv2; u8 pfr0_csv3; @@ -436,6 +438,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */ #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */ #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14) +#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */ #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | \ @@ -454,6 +457,15 @@ struct kvm_vcpu_arch { #define vcpu_has_ptrauth(vcpu) false #endif +#define vcpu_on_unsupported_cpu(vcpu) \ + ((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU) + +#define vcpu_set_on_unsupported_cpu(vcpu) \ + ((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU) + +#define vcpu_clear_on_unsupported_cpu(vcpu) \ + ((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU) + #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs) /* diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 1d0a0a2a9711..d49f714f48e6 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -414,6 +414,9 @@ struct kvm_arm_copy_mte_tags { #define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS #define KVM_PSCI_RET_DENIED PSCI_RET_DENIED +/* run->fail_entry.hardware_entry_failure_reason codes. */ +#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0) + #endif #endif /* __ARM_KVM_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 4783dbf66df2..13c1318d8b9a 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -150,6 +150,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (ret) goto out_free_stage2_pgd; + if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) + goto out_free_stage2_pgd; + cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); + kvm_vgic_early_init(kvm); /* The maximum number of VCPUs is limited by the host's GIC model */ @@ -176,6 +180,7 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) void kvm_arch_destroy_vm(struct kvm *kvm) { bitmap_free(kvm->arch.pmu_filter); + free_cpumask_var(kvm->arch.supported_cpus); kvm_vgic_destroy(kvm); @@ -411,6 +416,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); kvm_arch_vcpu_load_debug_state_flags(vcpu); + + if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus)) + vcpu_set_on_unsupported_cpu(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -423,6 +431,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_vgic_put(vcpu); kvm_vcpu_pmu_restore_host(vcpu); + vcpu_clear_on_unsupported_cpu(vcpu); vcpu->cpu = -1; } @@ -796,6 +805,14 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) } } + if (unlikely(vcpu_on_unsupported_cpu(vcpu))) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; + run->fail_entry.cpu = smp_processor_id(); + *ret = 0; + return true; + } + return kvm_request_pending(vcpu) || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) || xfer_to_guest_mode_work_pending(); diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 18361f367495..4526a5824dac 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -968,6 +968,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) } kvm->arch.arm_pmu = arm_pmu; + cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); ret = 0; break; } From 512865d83fd9685a4d5aab26f898737b57d3187e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 21 Feb 2022 15:35:22 +0000 Subject: [PATCH 30/39] KVM: arm64: Bump guest PSCI version to 1.1 Expose PSCI version v1.1 to the guest by default. The only difference for now is that an updated version number is reported by PSCI_VERSION. Cc: Marc Zyngier Cc: James Morse Cc: Alexandru Elisei Cc: Suzuki K Poulose Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220221153524.15397-2-will@kernel.org --- arch/arm64/kvm/psci.c | 12 +++++++++--- include/kvm/arm_psci.h | 3 ++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 3eae32876897..70d07477e712 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -305,16 +305,19 @@ out: return ret; } -static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu) +static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) { u32 psci_fn = smccc_get_function(vcpu); u32 feature; unsigned long val; int ret = 1; + if (minor > 1) + return -EINVAL; + switch(psci_fn) { case PSCI_0_2_FN_PSCI_VERSION: - val = KVM_ARM_PSCI_1_0; + val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1; break; case PSCI_1_0_FN_PSCI_FEATURES: feature = smccc_get_arg1(vcpu); @@ -393,8 +396,10 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) int kvm_psci_call(struct kvm_vcpu *vcpu) { switch (kvm_psci_version(vcpu, vcpu->kvm)) { + case KVM_ARM_PSCI_1_1: + return kvm_psci_1_x_call(vcpu, 1); case KVM_ARM_PSCI_1_0: - return kvm_psci_1_0_call(vcpu); + return kvm_psci_1_x_call(vcpu, 0); case KVM_ARM_PSCI_0_2: return kvm_psci_0_2_call(vcpu); case KVM_ARM_PSCI_0_1: @@ -511,6 +516,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) return 0; case KVM_ARM_PSCI_0_2: case KVM_ARM_PSCI_1_0: + case KVM_ARM_PSCI_1_1: if (!wants_02) return -EINVAL; vcpu->kvm->arch.psci_version = val; diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 5b58bd2fe088..4a1003323a0c 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -13,8 +13,9 @@ #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) #define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) +#define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1) -#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_1 /* * We need the KVM pointer independently from the vcpu as we can call From d43583b890e7cb0078d13d056753a56602b92406 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 21 Feb 2022 15:35:23 +0000 Subject: [PATCH 31/39] KVM: arm64: Expose PSCI SYSTEM_RESET2 call to the guest PSCI v1.1 introduces the optional SYSTEM_RESET2 call, which allows the caller to provide a vendor-specific "reset type" and "cookie" to request a particular form of reset or shutdown. Expose this call to the guest and handle it in the same way as PSCI SYSTEM_RESET, along with some basic range checking on the type argument. Cc: Marc Zyngier Cc: James Morse Cc: Alexandru Elisei Cc: Suzuki K Poulose Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220221153524.15397-3-will@kernel.org --- arch/arm64/kvm/psci.c | 33 +++++++++++++++++++++++++++++---- include/uapi/linux/psci.h | 4 ++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 70d07477e712..30fcc5a99483 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -308,7 +308,7 @@ out: static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) { u32 psci_fn = smccc_get_function(vcpu); - u32 feature; + u32 arg; unsigned long val; int ret = 1; @@ -320,12 +320,12 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1; break; case PSCI_1_0_FN_PSCI_FEATURES: - feature = smccc_get_arg1(vcpu); - val = kvm_psci_check_allowed_function(vcpu, feature); + arg = smccc_get_arg1(vcpu); + val = kvm_psci_check_allowed_function(vcpu, arg); if (val) break; - switch(feature) { + switch(arg) { case PSCI_0_2_FN_PSCI_VERSION: case PSCI_0_2_FN_CPU_SUSPEND: case PSCI_0_2_FN64_CPU_SUSPEND: @@ -341,11 +341,36 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) case ARM_SMCCC_VERSION_FUNC_ID: val = 0; break; + case PSCI_1_1_FN_SYSTEM_RESET2: + case PSCI_1_1_FN64_SYSTEM_RESET2: + if (minor >= 1) { + val = 0; + break; + } + fallthrough; default: val = PSCI_RET_NOT_SUPPORTED; break; } break; + case PSCI_1_1_FN_SYSTEM_RESET2: + kvm_psci_narrow_to_32bit(vcpu); + fallthrough; + case PSCI_1_1_FN64_SYSTEM_RESET2: + if (minor >= 1) { + arg = smccc_get_arg1(vcpu); + + if (arg > PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET && + arg < PSCI_1_1_RESET_TYPE_VENDOR_START) { + val = PSCI_RET_INVALID_PARAMS; + } else { + kvm_psci_system_reset(vcpu); + val = PSCI_RET_INTERNAL_FAILURE; + ret = 0; + } + break; + }; + fallthrough; default: return kvm_psci_0_2_call(vcpu); } diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index 2fcad1dd0b0e..2bf93c0d6354 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h @@ -82,6 +82,10 @@ #define PSCI_0_2_TOS_UP_NO_MIGRATE 1 #define PSCI_0_2_TOS_MP 2 +/* PSCI v1.1 reset type encoding for SYSTEM_RESET2 */ +#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0 +#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U + /* PSCI version decoding (independent of PSCI version) */ #define PSCI_VERSION_MAJOR_SHIFT 16 #define PSCI_VERSION_MINOR_MASK \ From 34739fd95fab3a5efb0422e4f012b685e33598dc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 21 Feb 2022 15:35:24 +0000 Subject: [PATCH 32/39] KVM: arm64: Indicate SYSTEM_RESET2 in kvm_run::system_event flags field When handling reset and power-off PSCI calls from the guest, we initialise X0 to PSCI_RET_INTERNAL_FAILURE in case the VMM tries to re-run the vCPU after issuing the call. Unfortunately, this also means that the VMM cannot see which PSCI call was issued and therefore cannot distinguish between PSCI SYSTEM_RESET and SYSTEM_RESET2 calls, which is necessary in order to determine the validity of the "reset_type" in X1. Allocate bit 0 of the previously unused 'flags' field of the system_event structure so that we can indicate the PSCI call used to initiate the reset. Cc: Marc Zyngier Cc: James Morse Cc: Alexandru Elisei Cc: Suzuki K Poulose Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220221153524.15397-4-will@kernel.org --- Documentation/virt/kvm/api.rst | 5 +++++ arch/arm64/include/uapi/asm/kvm.h | 7 +++++++ arch/arm64/kvm/psci.c | 15 +++++++++++---- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a4267104db50..8305ac9747b6 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -5939,6 +5939,11 @@ Valid values for 'type' are: to ignore the request, or to gather VM memory core dump and/or reset/shutdown of the VM. +Valid flags are: + + - KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (arm64 only) -- the guest issued + a SYSTEM_RESET2 call according to v1.1 of the PSCI specification. + :: /* KVM_EXIT_IOAPIC_EOI */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index b3edde68bc3e..06bc08fdf8ea 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -413,6 +413,13 @@ struct kvm_arm_copy_mte_tags { #define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS #define KVM_PSCI_RET_DENIED PSCI_RET_DENIED +/* arm64-specific kvm_run::system_event flags */ +/* + * Reset caused by a PSCI v1.1 SYSTEM_RESET2 call. + * Valid only when the system event has a type of KVM_SYSTEM_EVENT_RESET. + */ +#define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (1ULL << 0) + #endif #endif /* __ARM_KVM_H__ */ diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 30fcc5a99483..ecb4b74cb12a 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -162,7 +162,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) return PSCI_0_2_AFFINITY_LEVEL_OFF; } -static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) +static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags) { unsigned long i; struct kvm_vcpu *tmp; @@ -182,17 +182,24 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); vcpu->run->system_event.type = type; + vcpu->run->system_event.flags = flags; vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; } static void kvm_psci_system_off(struct kvm_vcpu *vcpu) { - kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN); + kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN, 0); } static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) { - kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); + kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET, 0); +} + +static void kvm_psci_system_reset2(struct kvm_vcpu *vcpu) +{ + kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET, + KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2); } static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu) @@ -364,7 +371,7 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) arg < PSCI_1_1_RESET_TYPE_VENDOR_START) { val = PSCI_RET_INVALID_PARAMS; } else { - kvm_psci_system_reset(vcpu); + kvm_psci_system_reset2(vcpu); val = PSCI_RET_INTERNAL_FAILURE; ret = 0; } From ae82047e97a3014d843d4fb931922982ef625e54 Mon Sep 17 00:00:00 2001 From: Changcheng Deng Date: Wed, 23 Feb 2022 09:27:50 +0000 Subject: [PATCH 33/39] KVM: arm64: Remove unneeded semicolons Fix the following coccicheck review: ./arch/arm64/kvm/psci.c: 379: 3-4: Unneeded semicolon Reported-by: Zeal Robot Signed-off-by: Changcheng Deng [maz: squashed another instance of the same issue in the patch] Signed-off-by: Marc Zyngier Acked-by: Will Deacon Link: https://lore.kernel.org/r/20220223092750.1934130-1-deng.changcheng@zte.com.cn Link: https://lore.kernel.org/r/20220225122922.GA19390@willie-the-truck --- arch/arm64/kvm/psci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index ecb4b74cb12a..0d48d1e7291d 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -376,7 +376,7 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) ret = 0; } break; - }; + } fallthrough; default: return kvm_psci_0_2_call(vcpu); @@ -438,7 +438,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) return kvm_psci_0_1_call(vcpu); default: return -EINVAL; - }; + } } int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) From 4c11113c1a3d10f5b617e5d2b9acd8d1d715450f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 25 Feb 2022 18:46:58 +0000 Subject: [PATCH 34/39] KVM: arm64: Enable Cortex-A510 erratum 2077057 by default The recently added configuration option for Cortex A510 erratum 2077057 does not have a "default y" unlike other errata fixes. This appears to simply be an oversight since the help text suggests enabling the option if unsure and there's nothing in the commit log to suggest it is intentional. Fixes: 1dd498e5e26ad ("KVM: arm64: Workaround Cortex-A510's single-step and PAC trap errata") Signed-off-by: Mark Brown Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220225184658.172527-1-broonie@kernel.org --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index cbcd42decb2a..ae706cab97a5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -682,6 +682,7 @@ config ARM64_ERRATUM_2051678 config ARM64_ERRATUM_2077057 bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2" + default y help This option adds the workaround for ARM Cortex-A510 erratum 2077057. Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is From f7659f8bcdf86b119d945f3335f20eda219a23ff Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 4 Mar 2022 12:04:49 +0000 Subject: [PATCH 35/39] KVM: arm64: Only open the interrupt window on exit due to an interrupt Now that we properly account for interrupts taken whilst the guest was running, it becomes obvious that there is no need to open this accounting window if we didn't exit because of an interrupt. This saves a number of system register accesses and other barriers if we exited for any other reason (such as a trap, for example). Signed-off-by: Marc Zyngier Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20220304135914.1464721-1-maz@kernel.org --- arch/arm64/kvm/arm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ecc5958e27fe..5254a97929f7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -945,9 +945,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * context synchronization event) is necessary to ensure that * pending interrupts are taken. */ - local_irq_enable(); - isb(); - local_irq_disable(); + if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) { + local_irq_enable(); + isb(); + local_irq_disable(); + } guest_timing_exit_irqoff(); From 3fbf4207dc6807bf98e3d32558753cfa5eac2fa1 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 8 Mar 2022 17:28:57 +0000 Subject: [PATCH 36/39] Documentation: KVM: Update documentation to indicate KVM is arm64-only KVM support for 32-bit ARM hosts (KVM/arm) has been removed from the kernel since commit 541ad0150ca4 ("arm: Remove 32bit KVM host support"). There still exists some remnants of the old architecture in the KVM documentation. Remove all traces of 32-bit host support from the documentation. Note that AArch32 guests are still supported. Suggested-by: Marc Zyngier Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220308172856.2997250-1-oupton@google.com --- Documentation/virt/kvm/api.rst | 87 ++++++++++++------------- Documentation/virt/kvm/devices/vcpu.rst | 2 +- 2 files changed, 44 insertions(+), 45 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a4267104db50..cfbb63873963 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -417,7 +417,7 @@ kvm_run' (see below). ----------------- :Capability: basic -:Architectures: all except ARM, arm64 +:Architectures: all except arm64 :Type: vcpu ioctl :Parameters: struct kvm_regs (out) :Returns: 0 on success, -1 on error @@ -450,7 +450,7 @@ Reads the general purpose registers from the vcpu. ----------------- :Capability: basic -:Architectures: all except ARM, arm64 +:Architectures: all except arm64 :Type: vcpu ioctl :Parameters: struct kvm_regs (in) :Returns: 0 on success, -1 on error @@ -824,7 +824,7 @@ Writes the floating point state to the vcpu. ----------------------- :Capability: KVM_CAP_IRQCHIP, KVM_CAP_S390_IRQCHIP (s390) -:Architectures: x86, ARM, arm64, s390 +:Architectures: x86, arm64, s390 :Type: vm ioctl :Parameters: none :Returns: 0 on success, -1 on error @@ -833,7 +833,7 @@ Creates an interrupt controller model in the kernel. On x86, creates a virtual ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23 only go to the IOAPIC. -On ARM/arm64, a GICv2 is created. Any other GIC versions require the usage of +On arm64, a GICv2 is created. Any other GIC versions require the usage of KVM_CREATE_DEVICE, which also supports creating a GICv2. Using KVM_CREATE_DEVICE is preferred over KVM_CREATE_IRQCHIP for GICv2. On s390, a dummy irq routing table is created. @@ -846,7 +846,7 @@ before KVM_CREATE_IRQCHIP can be used. ----------------- :Capability: KVM_CAP_IRQCHIP -:Architectures: x86, arm, arm64 +:Architectures: x86, arm64 :Type: vm ioctl :Parameters: struct kvm_irq_level :Returns: 0 on success, -1 on error @@ -870,7 +870,7 @@ capability is present (or unless it is not using the in-kernel irqchip, of course). -ARM/arm64 can signal an interrupt either at the CPU level, or at the +arm64 can signal an interrupt either at the CPU level, or at the in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for specific cpus. The irq field is interpreted like this:: @@ -896,7 +896,7 @@ When KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 is supported, the target vcpu is identified as (256 * vcpu2_index + vcpu_index). Otherwise, vcpu2_index must be zero. -Note that on arm/arm64, the KVM_CAP_IRQCHIP capability only conditions +Note that on arm64, the KVM_CAP_IRQCHIP capability only conditions injection of interrupts for the in-kernel irqchip. KVM_IRQ_LINE can always be used for a userspace interrupt controller. @@ -1087,7 +1087,7 @@ Other flags returned by ``KVM_GET_CLOCK`` are accepted but ignored. :Capability: KVM_CAP_VCPU_EVENTS :Extended by: KVM_CAP_INTR_SHADOW -:Architectures: x86, arm, arm64 +:Architectures: x86, arm64 :Type: vcpu ioctl :Parameters: struct kvm_vcpu_event (out) :Returns: 0 on success, -1 on error @@ -1146,8 +1146,8 @@ The following bits are defined in the flags field: fields contain a valid state. This bit will be set whenever KVM_CAP_EXCEPTION_PAYLOAD is enabled. -ARM/ARM64: -^^^^^^^^^^ +ARM64: +^^^^^^ If the guest accesses a device that is being emulated by the host kernel in such a way that a real device would generate a physical SError, KVM may make @@ -1206,7 +1206,7 @@ directly to the virtual CPU). :Capability: KVM_CAP_VCPU_EVENTS :Extended by: KVM_CAP_INTR_SHADOW -:Architectures: x86, arm, arm64 +:Architectures: x86, arm64 :Type: vcpu ioctl :Parameters: struct kvm_vcpu_event (in) :Returns: 0 on success, -1 on error @@ -1241,8 +1241,8 @@ can be set in the flags field to signal that the exception_has_payload, exception_payload, and exception.pending fields contain a valid state and shall be written into the VCPU. -ARM/ARM64: -^^^^^^^^^^ +ARM64: +^^^^^^ User space may need to inject several types of events to the guest. @@ -1449,7 +1449,7 @@ for vm-wide capabilities. --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm, arm64, riscv +:Architectures: x86, s390, arm64, riscv :Type: vcpu ioctl :Parameters: struct kvm_mp_state (out) :Returns: 0 on success; -1 on error @@ -1467,7 +1467,7 @@ Possible values are: ========================== =============================================== KVM_MP_STATE_RUNNABLE the vcpu is currently running - [x86,arm/arm64,riscv] + [x86,arm64,riscv] KVM_MP_STATE_UNINITIALIZED the vcpu is an application processor (AP) which has not yet received an INIT signal [x86] KVM_MP_STATE_INIT_RECEIVED the vcpu has received an INIT signal, and is @@ -1476,7 +1476,7 @@ Possible values are: is waiting for an interrupt [x86] KVM_MP_STATE_SIPI_RECEIVED the vcpu has just received a SIPI (vector accessible via KVM_GET_VCPU_EVENTS) [x86] - KVM_MP_STATE_STOPPED the vcpu is stopped [s390,arm/arm64,riscv] + KVM_MP_STATE_STOPPED the vcpu is stopped [s390,arm64,riscv] KVM_MP_STATE_CHECK_STOP the vcpu is in a special error state [s390] KVM_MP_STATE_OPERATING the vcpu is operating (running or halted) [s390] @@ -1488,8 +1488,8 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel irqchip, the multiprocessing state must be maintained by userspace on these architectures. -For arm/arm64/riscv: -^^^^^^^^^^^^^^^^^^^^ +For arm64/riscv: +^^^^^^^^^^^^^^^^ The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. @@ -1498,7 +1498,7 @@ KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm, arm64, riscv +:Architectures: x86, s390, arm64, riscv :Type: vcpu ioctl :Parameters: struct kvm_mp_state (in) :Returns: 0 on success; -1 on error @@ -1510,8 +1510,8 @@ On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel irqchip, the multiprocessing state must be maintained by userspace on these architectures. -For arm/arm64/riscv: -^^^^^^^^^^^^^^^^^^^^ +For arm64/riscv: +^^^^^^^^^^^^^^^^ The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. @@ -1780,14 +1780,14 @@ The flags bitmap is defined as:: ------------------------ :Capability: KVM_CAP_IRQ_ROUTING -:Architectures: x86 s390 arm arm64 +:Architectures: x86 s390 arm64 :Type: vm ioctl :Parameters: struct kvm_irq_routing (in) :Returns: 0 on success, -1 on error Sets the GSI routing table entries, overwriting any previously set entries. -On arm/arm64, GSI routing has the following limitation: +On arm64, GSI routing has the following limitation: - GSI routing does not apply to KVM_IRQ_LINE but only to KVM_IRQFD. @@ -2855,7 +2855,7 @@ after pausing the vcpu, but before it is resumed. ------------------- :Capability: KVM_CAP_SIGNAL_MSI -:Architectures: x86 arm arm64 +:Architectures: x86 arm64 :Type: vm ioctl :Parameters: struct kvm_msi (in) :Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error @@ -3043,7 +3043,7 @@ into the hash PTE second double word). -------------- :Capability: KVM_CAP_IRQFD -:Architectures: x86 s390 arm arm64 +:Architectures: x86 s390 arm64 :Type: vm ioctl :Parameters: struct kvm_irqfd (in) :Returns: 0 on success, -1 on error @@ -3069,7 +3069,7 @@ Note that closing the resamplefd is not sufficient to disable the irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment and need not be specified with KVM_IRQFD_FLAG_DEASSIGN. -On arm/arm64, gsi routing being supported, the following can happen: +On arm64, gsi routing being supported, the following can happen: - in case no routing entry is associated to this gsi, injection fails - in case the gsi is associated to an irqchip routing entry, @@ -3325,7 +3325,7 @@ current state. "addr" is ignored. ---------------------- :Capability: basic -:Architectures: arm, arm64 +:Architectures: arm64 :Type: vcpu ioctl :Parameters: struct kvm_vcpu_init (in) :Returns: 0 on success; -1 on error @@ -3423,7 +3423,7 @@ Possible features: ----------------------------- :Capability: basic -:Architectures: arm, arm64 +:Architectures: arm64 :Type: vm ioctl :Parameters: struct kvm_vcpu_init (out) :Returns: 0 on success; -1 on error @@ -3452,7 +3452,7 @@ VCPU matching underlying host. --------------------- :Capability: basic -:Architectures: arm, arm64, mips +:Architectures: arm64, mips :Type: vcpu ioctl :Parameters: struct kvm_reg_list (in/out) :Returns: 0 on success; -1 on error @@ -3479,7 +3479,7 @@ KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. ----------------------------------------- :Capability: KVM_CAP_ARM_SET_DEVICE_ADDR -:Architectures: arm, arm64 +:Architectures: arm64 :Type: vm ioctl :Parameters: struct kvm_arm_device_address (in) :Returns: 0 on success, -1 on error @@ -3506,13 +3506,13 @@ can access emulated or directly exposed devices, which the host kernel needs to know about. The id field is an architecture specific identifier for a specific device. -ARM/arm64 divides the id field into two parts, a device id and an +arm64 divides the id field into two parts, a device id and an address type id specific to the individual device:: bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 | field: | 0x00000000 | device id | addr type id | -ARM/arm64 currently only require this when using the in-kernel GIC +arm64 currently only require this when using the in-kernel GIC support for the hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2 as the device id. When setting the base address for the guest's mapping of the VGIC virtual CPU and distributor interface, the ioctl @@ -4726,7 +4726,7 @@ to I/O ports. ------------------------------------ :Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 -:Architectures: x86, arm, arm64, mips +:Architectures: x86, arm64, mips :Type: vm ioctl :Parameters: struct kvm_clear_dirty_log (in) :Returns: 0 on success, -1 on error @@ -4838,7 +4838,7 @@ version has the following quirks: 4.119 KVM_ARM_VCPU_FINALIZE --------------------------- -:Architectures: arm, arm64 +:Architectures: arm64 :Type: vcpu ioctl :Parameters: int feature (in) :Returns: 0 on success, -1 on error @@ -5920,7 +5920,7 @@ should put the acknowledged interrupt vector into the 'epr' field. If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered a system-level event using some architecture specific mechanism (hypercall -or some special instruction). In case of ARM/ARM64, this is triggered using +or some special instruction). In case of ARM64, this is triggered using HVC instruction based PSCI call from the vcpu. The 'type' field describes the system-level event type. The 'flags' field describes architecture specific flags for the system-level event. @@ -6013,7 +6013,7 @@ in send_page or recv a buffer to recv_page). __u64 fault_ipa; } arm_nisv; -Used on arm and arm64 systems. If a guest accesses memory not in a memslot, +Used on arm64 systems. If a guest accesses memory not in a memslot, KVM will typically return to userspace and ask it to do MMIO emulation on its behalf. However, for certain classes of instructions, no instruction decode (direction, length of memory access) is provided, and fetching and decoding @@ -6030,11 +6030,10 @@ did not fall within an I/O window. Userspace implementations can query for KVM_CAP_ARM_NISV_TO_USER, and enable this capability at VM creation. Once this is done, these types of errors will instead return to userspace with KVM_EXIT_ARM_NISV, with the valid bits from -the HSR (arm) and ESR_EL2 (arm64) in the esr_iss field, and the faulting IPA -in the fault_ipa field. Userspace can either fix up the access if it's -actually an I/O access by decoding the instruction from guest memory (if it's -very brave) and continue executing the guest, or it can decide to suspend, -dump, or restart the guest. +the ESR_EL2 in the esr_iss field, and the faulting IPA in the fault_ipa field. +Userspace can either fix up the access if it's actually an I/O access by +decoding the instruction from guest memory (if it's very brave) and continue +executing the guest, or it can decide to suspend, dump, or restart the guest. Note that KVM does not skip the faulting instruction as it does for KVM_EXIT_MMIO, but userspace has to emulate any change to the processing state @@ -6741,7 +6740,7 @@ and injected exceptions. 7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 -:Architectures: x86, arm, arm64, mips +:Architectures: x86, arm64, mips :Parameters: args[0] whether feature should be enabled or not Valid flags are:: @@ -7124,7 +7123,7 @@ reserved. 8.9 KVM_CAP_ARM_USER_IRQ ------------------------ -:Architectures: arm, arm64 +:Architectures: arm64 This capability, if KVM_CHECK_EXTENSION indicates that it is available, means that if userspace creates a VM without an in-kernel interrupt controller, it @@ -7251,7 +7250,7 @@ HvFlushVirtualAddressList, HvFlushVirtualAddressListEx. 8.19 KVM_CAP_ARM_INJECT_SERROR_ESR ---------------------------------- -:Architectures: arm, arm64 +:Architectures: arm64 This capability indicates that userspace can specify (via the KVM_SET_VCPU_EVENTS ioctl) the syndrome value reported to the guest when it diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst index 60a29972d3f1..92942440a9e7 100644 --- a/Documentation/virt/kvm/devices/vcpu.rst +++ b/Documentation/virt/kvm/devices/vcpu.rst @@ -108,7 +108,7 @@ using event 0x11 (CPU_CYCLES). 2. GROUP: KVM_ARM_VCPU_TIMER_CTRL ================================= -:Architectures: ARM, ARM64 +:Architectures: ARM64 2.1. ATTRIBUTES: KVM_ARM_VCPU_TIMER_IRQ_VTIMER, KVM_ARM_VCPU_TIMER_IRQ_PTIMER ----------------------------------------------------------------------------- From 9d3e7b7c82fd9d40240867ef4c45388cd05031f3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Mar 2022 18:13:08 +0000 Subject: [PATCH 37/39] KVM: arm64: Really propagate PSCI SYSTEM_RESET2 arguments to userspace Commit d43583b890e7 ("KVM: arm64: Expose PSCI SYSTEM_RESET2 call to the guest") hooked up the SYSTEM_RESET2 PSCI call for guests but failed to preserve its arguments for userspace, instead overwriting them with zeroes via smccc_set_retval(). As Linux only passes zeroes for these arguments, this appeared to be working for Linux guests. Oh well. Don't call smccc_set_retval() for a SYSTEM_RESET2 heading to userspace and instead set X0 (and only X0) explicitly to PSCI_RET_INTERNAL_FAILURE just in case the vCPU re-enters the guest. Fixes: d43583b890e7 ("KVM: arm64: Expose PSCI SYSTEM_RESET2 call to the guest") Reported-by: Andrew Walbran Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220309181308.982-1-will@kernel.org --- arch/arm64/kvm/psci.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 0d48d1e7291d..3e007d6982af 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -367,14 +367,14 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) if (minor >= 1) { arg = smccc_get_arg1(vcpu); - if (arg > PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET && - arg < PSCI_1_1_RESET_TYPE_VENDOR_START) { - val = PSCI_RET_INVALID_PARAMS; - } else { + if (arg <= PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET || + arg >= PSCI_1_1_RESET_TYPE_VENDOR_START) { kvm_psci_system_reset2(vcpu); - val = PSCI_RET_INTERNAL_FAILURE; - ret = 0; + vcpu_set_reg(vcpu, 0, PSCI_RET_INTERNAL_FAILURE); + return 0; } + + val = PSCI_RET_INVALID_PARAMS; break; } fallthrough; From 06394531b425794dc56f3d525b7994d25b8072f7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 11 Mar 2022 17:39:47 +0000 Subject: [PATCH 38/39] KVM: arm64: Generalise VM features into a set of flags We currently deal with a set of booleans for VM features, while they could be better represented as set of flags contained in an unsigned long, similarily to what we are doing on the CPU side. Signed-off-by: Marc Zyngier [Oliver: Flag-ify the 'ran_once' boolean] Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220311174001.605719-2-oupton@google.com --- arch/arm64/include/asm/kvm_host.h | 15 +++++++++------ arch/arm64/kvm/arm.c | 7 ++++--- arch/arm64/kvm/mmio.c | 3 ++- arch/arm64/kvm/pmu-emul.c | 4 ++-- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 76f795b628f1..0e96087885fe 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -122,7 +122,12 @@ struct kvm_arch { * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is * supported. */ - bool return_nisv_io_abort_to_user; +#define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0 + /* Memory Tagging Extension enabled for the guest */ +#define KVM_ARCH_FLAG_MTE_ENABLED 1 + /* At least one vCPU has ran in the VM */ +#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2 + unsigned long flags; /* * VM-wide PMU filter, implemented as a bitmap and big enough for @@ -135,10 +140,6 @@ struct kvm_arch { u8 pfr0_csv2; u8 pfr0_csv3; - - /* Memory Tagging Extension enabled for the guest */ - bool mte_enabled; - bool ran_once; }; struct kvm_vcpu_fault_info { @@ -810,7 +811,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); #define kvm_arm_vcpu_sve_finalized(vcpu) \ ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) -#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled) +#define kvm_has_mte(kvm) \ + (system_supports_mte() && \ + test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags)) #define kvm_vcpu_has_pmu(vcpu) \ (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index f49ebdd9c990..17021bc8ee2c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -84,7 +84,8 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, switch (cap->cap) { case KVM_CAP_ARM_NISV_TO_USER: r = 0; - kvm->arch.return_nisv_io_abort_to_user = true; + set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, + &kvm->arch.flags); break; case KVM_CAP_ARM_MTE: mutex_lock(&kvm->lock); @@ -92,7 +93,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, r = -EINVAL; } else { r = 0; - kvm->arch.mte_enabled = true; + set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); } mutex_unlock(&kvm->lock); break; @@ -559,7 +560,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); mutex_lock(&kvm->lock); - kvm->arch.ran_once = true; + set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); mutex_unlock(&kvm->lock); return ret; diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index 3e2d8ba11a02..3dd38a151d2a 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -135,7 +135,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) * volunteered to do so, and bail out otherwise. */ if (!kvm_vcpu_dabt_isvalid(vcpu)) { - if (vcpu->kvm->arch.return_nisv_io_abort_to_user) { + if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, + &vcpu->kvm->arch.flags)) { run->exit_reason = KVM_EXIT_ARM_NISV; run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu); run->arm_nisv.fault_ipa = fault_ipa; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 4526a5824dac..78fdc443adc7 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -961,7 +961,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) list_for_each_entry(entry, &arm_pmus, entry) { arm_pmu = entry->arm_pmu; if (arm_pmu->pmu.type == pmu_id) { - if (kvm->arch.ran_once || + if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) || (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { ret = -EBUSY; break; @@ -1044,7 +1044,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) mutex_lock(&kvm->lock); - if (kvm->arch.ran_once) { + if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) { mutex_unlock(&kvm->lock); return -EBUSY; } From 21ea457842759a236eefed2cfaa8cc7e5dc967a0 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 18 Mar 2022 11:37:19 +0100 Subject: [PATCH 39/39] KVM: arm64: fix typos in comments Various spelling mistakes in comments. Detected with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220318103729.157574-24-Julia.Lawall@inria.fr --- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/guest.c | 2 +- arch/arm64/kvm/handle_exit.c | 2 +- arch/arm64/kvm/hyp/nvhe/page_alloc.c | 4 ++-- arch/arm64/kvm/mmu.c | 2 +- arch/arm64/kvm/vgic/vgic.c | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 17021bc8ee2c..ba9165e84396 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1690,7 +1690,7 @@ static void init_cpu_logical_map(void) /* * Copy the MPIDR <-> logical CPU ID mapping to hyp. - * Only copy the set of online CPUs whose features have been chacked + * Only copy the set of online CPUs whose features have been checked * against the finalized system capabilities. The hypervisor will not * allow any other CPUs from the `possible` set to boot. */ diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index e116c7767730..7e15b03fbdf8 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -282,7 +282,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) break; /* - * Otherwide, this is a priviledged mode, and *all* the + * Otherwise, this is a privileged mode, and *all* the * registers must be narrowed to 32bit. */ default: diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index e3140abd2e2e..97fe14aab1a3 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -248,7 +248,7 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index) case ARM_EXCEPTION_HYP_GONE: /* * EL2 has been reset to the hyp-stub. This happens when a guest - * is pre-empted by kvm_reboot()'s shutdown call. + * is pre-emptied by kvm_reboot()'s shutdown call. */ run->exit_reason = KVM_EXIT_FAIL_ENTRY; return 0; diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c index 543cad6c376a..d40f0b30b534 100644 --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c @@ -102,7 +102,7 @@ static void __hyp_attach_page(struct hyp_pool *pool, * Only the first struct hyp_page of a high-order page (otherwise known * as the 'head') should have p->order set. The non-head pages should * have p->order = HYP_NO_ORDER. Here @p may no longer be the head - * after coallescing, so make sure to mark it HYP_NO_ORDER proactively. + * after coalescing, so make sure to mark it HYP_NO_ORDER proactively. */ p->order = HYP_NO_ORDER; for (; (order + 1) < pool->max_order; order++) { @@ -110,7 +110,7 @@ static void __hyp_attach_page(struct hyp_pool *pool, if (!buddy) break; - /* Take the buddy out of its list, and coallesce with @p */ + /* Take the buddy out of its list, and coalesce with @p */ page_remove_from_list(buddy); buddy->order = HYP_NO_ORDER; p = min(p, buddy); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 1623abc56af2..0d19259454d8 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -615,7 +615,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { }; /** - * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure + * kvm_init_stage2_mmu - Initialise a S2 MMU structure * @kvm: The pointer to the KVM structure * @mmu: The pointer to the s2 MMU structure * diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 9b98876a8a93..d97e6080b421 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -37,7 +37,7 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { * If you need to take multiple locks, always take the upper lock first, * then the lower ones, e.g. first take the its_lock, then the irq_lock. * If you are already holding a lock and need to take a higher one, you - * have to drop the lower ranking lock first and re-aquire it after having + * have to drop the lower ranking lock first and re-acquire it after having * taken the upper one. * * When taking more than one ap_list_lock at the same time, always take the