mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
KVM: arm64: Make active_vmids invalid on vCPU schedule out
Like ASID allocator, we copy the active_vmids into the reserved_vmids on a rollover. But it's unlikely that every CPU will have a vCPU as current task and we may end up unnecessarily reserving the VMID space. Hence, set active_vmids to an invalid one when scheduling out a vCPU. Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211122121844.867-5-shameerali.kolothum.thodi@huawei.com
This commit is contained in:
parent
3248136b36
commit
100b4f092f
@ -694,6 +694,7 @@ extern unsigned int kvm_arm_vmid_bits;
|
||||
int kvm_arm_vmid_alloc_init(void);
|
||||
void kvm_arm_vmid_alloc_free(void);
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
|
||||
void kvm_arm_vmid_clear_active(void);
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
{
|
||||
|
@ -417,6 +417,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
kvm_timer_vcpu_put(vcpu);
|
||||
kvm_vgic_put(vcpu);
|
||||
kvm_vcpu_pmu_restore_host(vcpu);
|
||||
kvm_arm_vmid_clear_active();
|
||||
|
||||
vcpu->cpu = -1;
|
||||
}
|
||||
|
@ -32,6 +32,13 @@ static DEFINE_PER_CPU(u64, reserved_vmids);
|
||||
#define vmid2idx(vmid) ((vmid) & ~VMID_MASK)
|
||||
#define idx2vmid(idx) vmid2idx(idx)
|
||||
|
||||
/*
|
||||
* As vmid #0 is always reserved, we will never allocate one
|
||||
* as below and can be treated as invalid. This is used to
|
||||
* set the active_vmids on vCPU schedule out.
|
||||
*/
|
||||
#define VMID_ACTIVE_INVALID VMID_FIRST_VERSION
|
||||
|
||||
#define vmid_gen_match(vmid) \
|
||||
(!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
|
||||
|
||||
@ -122,6 +129,12 @@ set_vmid:
|
||||
return vmid;
|
||||
}
|
||||
|
||||
/* Called from vCPU sched out with preemption disabled */
|
||||
void kvm_arm_vmid_clear_active(void)
|
||||
{
|
||||
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
|
||||
}
|
||||
|
||||
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -132,11 +145,17 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
||||
/*
|
||||
* Please refer comments in check_and_switch_context() in
|
||||
* arch/arm64/mm/context.c.
|
||||
*
|
||||
* Unlike ASID allocator, we set the active_vmids to
|
||||
* VMID_ACTIVE_INVALID on vCPU schedule out to avoid
|
||||
* reserving the VMID space needlessly on rollover.
|
||||
* Hence explicitly check here for a "!= 0" to
|
||||
* handle the sync with a concurrent rollover.
|
||||
*/
|
||||
old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
|
||||
if (old_active_vmid && vmid_gen_match(vmid) &&
|
||||
atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
||||
old_active_vmid, vmid))
|
||||
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
|
||||
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
||||
old_active_vmid, vmid))
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
|
||||
|
Loading…
Reference in New Issue
Block a user