diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index dbbf43d52623..93f43a3e7886 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -13,10 +13,10 @@ #include #include #include -#include #include #include #include +#include #include #define KVM_MAX_VCPUS 1024 @@ -95,10 +95,6 @@ struct kvm_arch { struct kvm_guest_timer timer; }; -struct kvm_sbi_context { - int return_handled; -}; - struct kvm_cpu_trap { unsigned long sepc; unsigned long scause; @@ -169,6 +165,11 @@ struct kvm_vcpu_arch { /* ISA feature bits (similar to MISA) */ DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); + /* Vendor, Arch, and Implementation details */ + unsigned long mvendorid; + unsigned long marchid; + unsigned long mimpid; + /* SSCRATCH, STVEC, and SCOUNTEREN of Host */ unsigned long host_sscratch; unsigned long host_stvec; @@ -217,7 +218,7 @@ struct kvm_vcpu_arch { struct kvm_csr_decode csr_decode; /* SBI context */ - struct kvm_sbi_context sbi_context; + struct kvm_vcpu_sbi_context sbi_context; /* Cache pages needed to program page tables with spinlock held */ struct kvm_mmu_memory_cache mmu_page_cache; @@ -327,7 +328,4 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); -int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); - #endif /* __RISCV_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index d4e3e600beef..f79478a85d2d 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -14,6 +14,10 @@ #define KVM_SBI_VERSION_MAJOR 1 #define KVM_SBI_VERSION_MINOR 0 +struct kvm_vcpu_sbi_context { + int return_handled; +}; + struct kvm_vcpu_sbi_extension { unsigned long extid_start; unsigned long extid_end; @@ -31,7 +35,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, struct kvm_run *run, u32 type, u64 flags); +int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run); const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); #ifdef CONFIG_RISCV_SBI_V01 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 8985ff234c01..92af6f3f057c 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -49,6 +49,9 @@ struct kvm_sregs { struct kvm_riscv_config { unsigned long isa; unsigned long zicbom_block_size; + unsigned long mvendorid; + unsigned long marchid; + unsigned long mimpid; }; /* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 775d3322b422..5c87db8fdff2 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -627,16 +627,19 @@ long sbi_get_mvendorid(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID); } +EXPORT_SYMBOL_GPL(sbi_get_mvendorid); long sbi_get_marchid(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID); } +EXPORT_SYMBOL_GPL(sbi_get_marchid); long sbi_get_mimpid(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID); } +EXPORT_SYMBOL_GPL(sbi_get_mimpid); static void sbi_send_cpumask_ipi(const struct cpumask *target) { diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index df2d8716851f..58c5489d3031 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -127,3 +127,9 @@ static int __init riscv_kvm_init(void) return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); } module_init(riscv_kvm_init); + +static void __exit riscv_kvm_exit(void) +{ + kvm_exit(); +} +module_exit(riscv_kvm_exit); diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 3620ecac2fa1..34b57e0be2ef 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -537,10 +537,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (change == KVM_MR_FLAGS_ONLY) goto out; - spin_lock(&kvm->mmu_lock); if (ret) - gstage_unmap_range(kvm, base_gpa, size, false); - spin_unlock(&kvm->mmu_lock); + kvm_riscv_gstage_iounmap(kvm, base_gpa, size); out: mmap_read_unlock(current->mm); @@ -632,7 +630,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, mmap_read_lock(current->mm); - vma = find_vma_intersection(current->mm, hva, hva + 1); + vma = vma_lookup(current->mm, hva); if (unlikely(!vma)) { kvm_err("Failed to find VMA for hva 0x%lx\n", hva); mmap_read_unlock(current->mm); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 71ebbc4821f0..7c08567097f0 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -21,6 +21,7 @@ #include #include #include +#include const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { KVM_GENERIC_VCPU_STATS(), @@ -171,6 +172,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) set_bit(host_isa, vcpu->arch.isa); } + /* Setup vendor, arch, and implementation details */ + vcpu->arch.mvendorid = sbi_get_mvendorid(); + vcpu->arch.marchid = sbi_get_marchid(); + vcpu->arch.mimpid = sbi_get_mimpid(); + /* Setup VCPU hfence queue */ spin_lock_init(&vcpu->arch.hfence_lock); @@ -270,6 +276,15 @@ static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu, return -EINVAL; reg_val = riscv_cbom_block_size; break; + case KVM_REG_RISCV_CONFIG_REG(mvendorid): + reg_val = vcpu->arch.mvendorid; + break; + case KVM_REG_RISCV_CONFIG_REG(marchid): + reg_val = vcpu->arch.marchid; + break; + case KVM_REG_RISCV_CONFIG_REG(mimpid): + reg_val = vcpu->arch.mimpid; + break; default: return -EINVAL; } @@ -296,12 +311,15 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) return -EFAULT; - /* This ONE REG interface is only defined for single letter extensions */ - if (fls(reg_val) >= RISCV_ISA_EXT_BASE) - return -EINVAL; - switch (reg_num) { case KVM_REG_RISCV_CONFIG_REG(isa): + /* + * This ONE REG interface is only defined for + * single letter extensions. + */ + if (fls(reg_val) >= RISCV_ISA_EXT_BASE) + return -EINVAL; + if (!vcpu->arch.ran_atleast_once) { /* Ignore the enable/disable request for certain extensions */ for (i = 0; i < RISCV_ISA_EXT_BASE; i++) { @@ -329,6 +347,24 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu, break; case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size): return -EOPNOTSUPP; + case KVM_REG_RISCV_CONFIG_REG(mvendorid): + if (!vcpu->arch.ran_atleast_once) + vcpu->arch.mvendorid = reg_val; + else + return -EBUSY; + break; + case KVM_REG_RISCV_CONFIG_REG(marchid): + if (!vcpu->arch.ran_atleast_once) + vcpu->arch.marchid = reg_val; + else + return -EBUSY; + break; + case KVM_REG_RISCV_CONFIG_REG(mimpid): + if (!vcpu->arch.ran_atleast_once) + vcpu->arch.mimpid = reg_val; + else + return -EBUSY; + break; default: return -EINVAL; } @@ -541,22 +577,26 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { - if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) + switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { + case KVM_REG_RISCV_CONFIG: return kvm_riscv_vcpu_set_reg_config(vcpu, reg); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) + case KVM_REG_RISCV_CORE: return kvm_riscv_vcpu_set_reg_core(vcpu, reg); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) + case KVM_REG_RISCV_CSR: return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) + case KVM_REG_RISCV_TIMER: return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) + case KVM_REG_RISCV_FP_F: return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_F); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) + case KVM_REG_RISCV_FP_D: return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_D); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT) + case KVM_REG_RISCV_ISA_EXT: return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); + default: + break; + } return -EINVAL; } @@ -564,22 +604,26 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { - if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG) + switch (reg->id & KVM_REG_RISCV_TYPE_MASK) { + case KVM_REG_RISCV_CONFIG: return kvm_riscv_vcpu_get_reg_config(vcpu, reg); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE) + case KVM_REG_RISCV_CORE: return kvm_riscv_vcpu_get_reg_core(vcpu, reg); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR) + case KVM_REG_RISCV_CSR: return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) + case KVM_REG_RISCV_TIMER: return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) + case KVM_REG_RISCV_FP_F: return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_F); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) + case KVM_REG_RISCV_FP_D: return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_D); - else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT) + case KVM_REG_RISCV_ISA_EXT: return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); + default: + break; + } return -EINVAL; } @@ -984,8 +1028,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) while (ret > 0) { /* Check conditions before entering the guest */ ret = xfer_to_guest_mode_handle_work(vcpu); - if (!ret) - ret = 1; + if (ret) + continue; + ret = 1; kvm_riscv_gstage_vmid_update(vcpu); diff --git a/arch/riscv/kvm/vcpu_sbi_base.c b/arch/riscv/kvm/vcpu_sbi_base.c index 48f431091cdb..5d65c634d301 100644 --- a/arch/riscv/kvm/vcpu_sbi_base.c +++ b/arch/riscv/kvm/vcpu_sbi_base.c @@ -10,9 +10,7 @@ #include #include #include -#include #include -#include #include static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, @@ -21,7 +19,6 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, { int ret = 0; struct kvm_cpu_context *cp = &vcpu->arch.guest_context; - struct sbiret ecall_ret; switch (cp->a6) { case SBI_EXT_BASE_GET_SPEC_VERSION: @@ -50,13 +47,13 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, *out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0; break; case SBI_EXT_BASE_GET_MVENDORID: + *out_val = vcpu->arch.mvendorid; + break; case SBI_EXT_BASE_GET_MARCHID: + *out_val = vcpu->arch.marchid; + break; case SBI_EXT_BASE_GET_MIMPID: - ecall_ret = sbi_ecall(SBI_EXT_BASE, cp->a6, 0, 0, 0, 0, 0, 0); - if (!ecall_ret.error) - *out_val = ecall_ret.value; - /*TODO: We are unnecessarily converting the error twice */ - ret = sbi_err_map_linux_errno(ecall_ret.error); + *out_val = vcpu->arch.mimpid; break; default: ret = -EOPNOTSUPP; diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c index 239dec0a628a..2e915cafd551 100644 --- a/arch/riscv/kvm/vcpu_sbi_hsm.c +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 4c034d8a606a..03a0198389f0 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c index 8a91a14e7139..489f225ee66d 100644 --- a/arch/riscv/kvm/vcpu_sbi_v01.c +++ b/arch/riscv/kvm/vcpu_sbi_v01.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include