mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-27 03:55:37 +08:00
Merge branch 'loongarch-kvm' into loongarch-next
This commit is contained in:
commit
18b722527e
@ -214,12 +214,6 @@ SYM_FUNC_START(kvm_enter_guest)
|
||||
/* Save host GPRs */
|
||||
kvm_save_host_gpr a2
|
||||
|
||||
/* Save host CRMD, PRMD to stack */
|
||||
csrrd a3, LOONGARCH_CSR_CRMD
|
||||
st.d a3, a2, PT_CRMD
|
||||
csrrd a3, LOONGARCH_CSR_PRMD
|
||||
st.d a3, a2, PT_PRMD
|
||||
|
||||
addi.d a2, a1, KVM_VCPU_ARCH
|
||||
st.d sp, a2, KVM_ARCH_HSP
|
||||
st.d tp, a2, KVM_ARCH_HTP
|
||||
|
@ -23,24 +23,6 @@ static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
|
||||
return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
|
||||
}
|
||||
|
||||
/*
|
||||
* Push timer forward on timeout.
|
||||
* Handle an hrtimer event by push the hrtimer forward a period.
|
||||
*/
|
||||
static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cfg, period;
|
||||
|
||||
/* Add periodic tick to current expire time */
|
||||
cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
|
||||
if (cfg & CSR_TCFG_PERIOD) {
|
||||
period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
|
||||
hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
|
||||
return HRTIMER_RESTART;
|
||||
} else
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
/* Low level hrtimer wake routine */
|
||||
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
|
||||
{
|
||||
@ -50,7 +32,7 @@ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
|
||||
kvm_queue_irq(vcpu, INT_TI);
|
||||
rcuwait_wake_up(&vcpu->wait);
|
||||
|
||||
return kvm_count_timeout(vcpu);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -93,7 +75,8 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
|
||||
/*
|
||||
* Freeze the soft-timer and sync the guest stable timer with it.
|
||||
*/
|
||||
hrtimer_cancel(&vcpu->arch.swtimer);
|
||||
if (kvm_vcpu_is_blocking(vcpu))
|
||||
hrtimer_cancel(&vcpu->arch.swtimer);
|
||||
|
||||
/*
|
||||
* From LoongArch Reference Manual Volume 1 Chapter 7.6.2
|
||||
@ -168,26 +151,20 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
|
||||
* Here judge one-shot timer fired by checking whether TVAL is larger
|
||||
* than TCFG
|
||||
*/
|
||||
if (ticks < cfg) {
|
||||
if (ticks < cfg)
|
||||
delta = tick_to_ns(vcpu, ticks);
|
||||
expire = ktime_add_ns(ktime_get(), delta);
|
||||
vcpu->arch.expire = expire;
|
||||
else
|
||||
delta = 0;
|
||||
|
||||
expire = ktime_add_ns(ktime_get(), delta);
|
||||
vcpu->arch.expire = expire;
|
||||
if (kvm_vcpu_is_blocking(vcpu)) {
|
||||
|
||||
/*
|
||||
* HRTIMER_MODE_PINNED is suggested since vcpu may run in
|
||||
* the same physical cpu in next time
|
||||
*/
|
||||
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
|
||||
} else if (vcpu->stat.generic.blocking) {
|
||||
/*
|
||||
* Inject timer interrupt so that halt polling can dectect and exit.
|
||||
* VCPU is scheduled out already and sleeps in rcuwait queue and
|
||||
* will not poll pending events again. kvm_queue_irq() is not enough,
|
||||
* hrtimer swtimer should be used here.
|
||||
*/
|
||||
expire = ktime_add_ns(ktime_get(), 10);
|
||||
vcpu->arch.expire = expire;
|
||||
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -304,11 +304,18 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
|
||||
return -EINVAL;
|
||||
|
||||
switch (id) {
|
||||
case 2:
|
||||
case LOONGARCH_CPUCFG0:
|
||||
*v = GENMASK(31, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG1:
|
||||
/* CPUCFG1_MSGINT is not supported by KVM */
|
||||
*v = GENMASK(25, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG2:
|
||||
/* CPUCFG2 features unconditionally supported by KVM */
|
||||
*v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
|
||||
CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
|
||||
CPUCFG2_LAM;
|
||||
CPUCFG2_LSPW | CPUCFG2_LAM;
|
||||
/*
|
||||
* For the ISA extensions listed below, if one is supported
|
||||
* by the host, then it is also supported by KVM.
|
||||
@ -318,14 +325,26 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
|
||||
if (cpu_has_lasx)
|
||||
*v |= CPUCFG2_LASX;
|
||||
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG3:
|
||||
*v = GENMASK(16, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG4:
|
||||
case LOONGARCH_CPUCFG5:
|
||||
*v = GENMASK(31, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG16:
|
||||
*v = GENMASK(16, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
|
||||
*v = GENMASK(30, 0);
|
||||
return 0;
|
||||
default:
|
||||
/*
|
||||
* No restrictions on other valid CPUCFG IDs' values, but
|
||||
* CPUCFG data is limited to 32 bits as the LoongArch ISA
|
||||
* manual says (Volume 1, Section 2.2.10.5 "CPUCFG").
|
||||
* CPUCFG bits should be zero if reserved by HW or not
|
||||
* supported by KVM.
|
||||
*/
|
||||
*v = U32_MAX;
|
||||
*v = 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -344,7 +363,7 @@ static int kvm_check_cpucfg(int id, u64 val)
|
||||
return -EINVAL;
|
||||
|
||||
switch (id) {
|
||||
case 2:
|
||||
case LOONGARCH_CPUCFG2:
|
||||
if (!(val & CPUCFG2_LLFTP))
|
||||
/* Guests must have a constant timer */
|
||||
return -EINVAL;
|
||||
|
Loading…
Reference in New Issue
Block a user