linux/arch/riscv/kvm/vcpu_fp.c
Atish Patra 9bfd900bee RISC-V: KVM: Improve ISA extension by using a bitmap
Currently, the every vcpu only stores the ISA extensions in a unsigned long
which is not scalable as number of extensions will continue to grow.
Using a bitmap allows the ISA extension to support any number of
extensions. The CONFIG one reg interface implementation is modified to
support the bitmap as well. But it is meant only for base extensions.
Thus, the first element of the bitmap array is sufficient for that
interface.

In the future, all the new multi-letter extensions must use the
ISA_EXT one reg interface that allows enabling/disabling any extension
now.

Signed-off-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
2022-07-29 17:14:11 +05:30

166 lines
4.7 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <atish.patra@wdc.com>
* Anup Patel <anup.patel@wdc.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/hwcap.h>
#ifdef CONFIG_FPU
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
cntx->sstatus &= ~SR_FS;
if (riscv_isa_extension_available(vcpu->arch.isa, f) ||
riscv_isa_extension_available(vcpu->arch.isa, d))
cntx->sstatus |= SR_FS_INITIAL;
else
cntx->sstatus |= SR_FS_OFF;
}
static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
{
cntx->sstatus &= ~SR_FS;
cntx->sstatus |= SR_FS_CLEAN;
}
void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
const unsigned long *isa)
{
if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
if (riscv_isa_extension_available(isa, d))
__kvm_riscv_fp_d_save(cntx);
else if (riscv_isa_extension_available(isa, f))
__kvm_riscv_fp_f_save(cntx);
kvm_riscv_vcpu_fp_clean(cntx);
}
}
void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
const unsigned long *isa)
{
if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
if (riscv_isa_extension_available(isa, d))
__kvm_riscv_fp_d_restore(cntx);
else if (riscv_isa_extension_available(isa, f))
__kvm_riscv_fp_f_restore(cntx);
kvm_riscv_vcpu_fp_clean(cntx);
}
}
void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
{
/* No need to check host sstatus as it can be modified outside */
if (riscv_isa_extension_available(NULL, d))
__kvm_riscv_fp_d_save(cntx);
else if (riscv_isa_extension_available(NULL, f))
__kvm_riscv_fp_f_save(cntx);
}
void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
{
if (riscv_isa_extension_available(NULL, d))
__kvm_riscv_fp_d_restore(cntx);
else if (riscv_isa_extension_available(NULL, f))
__kvm_riscv_fp_f_restore(cntx);
}
#endif
int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
unsigned long rtype)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
rtype);
void *reg_val;
if ((rtype == KVM_REG_RISCV_FP_F) &&
riscv_isa_extension_available(vcpu->arch.isa, f)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
reg_val = &cntx->fp.f.fcsr;
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
reg_val = &cntx->fp.f.f[reg_num];
else
return -EINVAL;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
reg_val = &cntx->fp.d.fcsr;
} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
reg_val = &cntx->fp.d.f[reg_num];
} else
return -EINVAL;
} else
return -EINVAL;
if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
unsigned long rtype)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
rtype);
void *reg_val;
if ((rtype == KVM_REG_RISCV_FP_F) &&
riscv_isa_extension_available(vcpu->arch.isa, f)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
reg_val = &cntx->fp.f.fcsr;
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
reg_val = &cntx->fp.f.f[reg_num];
else
return -EINVAL;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
reg_val = &cntx->fp.d.fcsr;
} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
reg_val = &cntx->fp.d.f[reg_num];
} else
return -EINVAL;
} else
return -EINVAL;
if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}