2021-11-22 20:18:41 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* VMID allocator.
|
|
|
|
*
|
|
|
|
* Based on Arm64 ASID allocator algorithm.
|
|
|
|
* Please refer arch/arm64/mm/context.c for detailed
|
|
|
|
* comments on algorithm.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/bitfield.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
|
|
|
|
#include <asm/kvm_asm.h>
|
|
|
|
#include <asm/kvm_mmu.h>
|
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 07:09:18 +08:00
|
|
|
unsigned int __ro_after_init kvm_arm_vmid_bits;
|
2021-11-22 20:18:41 +08:00
|
|
|
static DEFINE_RAW_SPINLOCK(cpu_vmid_lock);
|
|
|
|
|
|
|
|
static atomic64_t vmid_generation;
|
|
|
|
static unsigned long *vmid_map;
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(atomic64_t, active_vmids);
|
|
|
|
static DEFINE_PER_CPU(u64, reserved_vmids);
|
|
|
|
|
|
|
|
#define VMID_MASK (~GENMASK(kvm_arm_vmid_bits - 1, 0))
|
|
|
|
#define VMID_FIRST_VERSION (1UL << kvm_arm_vmid_bits)
|
|
|
|
|
|
|
|
#define NUM_USER_VMIDS VMID_FIRST_VERSION
|
|
|
|
#define vmid2idx(vmid) ((vmid) & ~VMID_MASK)
|
|
|
|
#define idx2vmid(idx) vmid2idx(idx)
|
|
|
|
|
2021-11-22 20:18:44 +08:00
|
|
|
/*
|
|
|
|
* As vmid #0 is always reserved, we will never allocate one
|
|
|
|
* as below and can be treated as invalid. This is used to
|
|
|
|
* set the active_vmids on vCPU schedule out.
|
|
|
|
*/
|
|
|
|
#define VMID_ACTIVE_INVALID VMID_FIRST_VERSION
|
|
|
|
|
2021-11-22 20:18:41 +08:00
|
|
|
#define vmid_gen_match(vmid) \
|
|
|
|
(!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
|
|
|
|
|
|
|
|
static void flush_context(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
u64 vmid;
|
|
|
|
|
2023-04-19 05:47:37 +08:00
|
|
|
bitmap_zero(vmid_map, NUM_USER_VMIDS);
|
2021-11-22 20:18:41 +08:00
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
|
|
|
|
|
|
|
|
/* Preserve reserved VMID */
|
|
|
|
if (vmid == 0)
|
|
|
|
vmid = per_cpu(reserved_vmids, cpu);
|
|
|
|
__set_bit(vmid2idx(vmid), vmid_map);
|
|
|
|
per_cpu(reserved_vmids, cpu) = vmid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlike ASID allocator, we expect less frequent rollover in
|
|
|
|
* case of VMIDs. Hence, instead of marking the CPU as
|
|
|
|
* flush_pending and issuing a local context invalidation on
|
|
|
|
* the next context-switch, we broadcast TLB flush + I-cache
|
|
|
|
* invalidation over the inner shareable domain on rollover.
|
|
|
|
*/
|
2022-06-02 10:48:05 +08:00
|
|
|
kvm_call_hyp(__kvm_flush_vm_context);
|
2021-11-22 20:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
bool hit = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate over the set of reserved VMIDs looking for a match
|
|
|
|
* and update to use newvmid (i.e. the same VMID in the current
|
|
|
|
* generation).
|
|
|
|
*/
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
if (per_cpu(reserved_vmids, cpu) == vmid) {
|
|
|
|
hit = true;
|
|
|
|
per_cpu(reserved_vmids, cpu) = newvmid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return hit;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 new_vmid(struct kvm_vmid *kvm_vmid)
|
|
|
|
{
|
|
|
|
static u32 cur_idx = 1;
|
|
|
|
u64 vmid = atomic64_read(&kvm_vmid->id);
|
|
|
|
u64 generation = atomic64_read(&vmid_generation);
|
|
|
|
|
|
|
|
if (vmid != 0) {
|
|
|
|
u64 newvmid = generation | (vmid & ~VMID_MASK);
|
|
|
|
|
|
|
|
if (check_update_reserved_vmid(vmid, newvmid)) {
|
|
|
|
atomic64_set(&kvm_vmid->id, newvmid);
|
|
|
|
return newvmid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
|
|
|
|
atomic64_set(&kvm_vmid->id, newvmid);
|
|
|
|
return newvmid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
|
|
|
|
if (vmid != NUM_USER_VMIDS)
|
|
|
|
goto set_vmid;
|
|
|
|
|
|
|
|
/* We're out of VMIDs, so increment the global generation count */
|
|
|
|
generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION,
|
|
|
|
&vmid_generation);
|
|
|
|
flush_context();
|
|
|
|
|
|
|
|
/* We have more VMIDs than CPUs, so this will always succeed */
|
|
|
|
vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
|
|
|
|
|
|
|
|
set_vmid:
|
|
|
|
__set_bit(vmid, vmid_map);
|
|
|
|
cur_idx = vmid;
|
|
|
|
vmid = idx2vmid(vmid) | generation;
|
|
|
|
atomic64_set(&kvm_vmid->id, vmid);
|
|
|
|
return vmid;
|
|
|
|
}
|
|
|
|
|
2021-11-22 20:18:44 +08:00
|
|
|
/* Called from vCPU sched out with preemption disabled */
|
|
|
|
void kvm_arm_vmid_clear_active(void)
|
|
|
|
{
|
|
|
|
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
|
|
|
|
}
|
|
|
|
|
2023-10-19 07:32:10 +08:00
|
|
|
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
|
2021-11-22 20:18:41 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
u64 vmid, old_active_vmid;
|
2023-10-19 07:32:10 +08:00
|
|
|
bool updated = false;
|
2021-11-22 20:18:41 +08:00
|
|
|
|
|
|
|
vmid = atomic64_read(&kvm_vmid->id);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Please refer comments in check_and_switch_context() in
|
|
|
|
* arch/arm64/mm/context.c.
|
2021-11-22 20:18:44 +08:00
|
|
|
*
|
|
|
|
* Unlike ASID allocator, we set the active_vmids to
|
|
|
|
* VMID_ACTIVE_INVALID on vCPU schedule out to avoid
|
|
|
|
* reserving the VMID space needlessly on rollover.
|
|
|
|
* Hence explicitly check here for a "!= 0" to
|
|
|
|
* handle the sync with a concurrent rollover.
|
2021-11-22 20:18:41 +08:00
|
|
|
*/
|
|
|
|
old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
|
2021-11-22 20:18:44 +08:00
|
|
|
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
|
|
|
|
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
|
|
|
|
old_active_vmid, vmid))
|
2023-10-19 07:32:10 +08:00
|
|
|
return false;
|
2021-11-22 20:18:41 +08:00
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
|
|
|
|
|
|
|
|
/* Check that our VMID belongs to the current generation. */
|
|
|
|
vmid = atomic64_read(&kvm_vmid->id);
|
2023-10-19 07:32:10 +08:00
|
|
|
if (!vmid_gen_match(vmid)) {
|
2021-11-22 20:18:41 +08:00
|
|
|
vmid = new_vmid(kvm_vmid);
|
2023-10-19 07:32:10 +08:00
|
|
|
updated = true;
|
|
|
|
}
|
2021-11-22 20:18:41 +08:00
|
|
|
|
|
|
|
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
|
|
|
|
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
|
2023-10-19 07:32:10 +08:00
|
|
|
|
|
|
|
return updated;
|
2021-11-22 20:18:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the VMID allocator
|
|
|
|
*/
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 07:09:18 +08:00
|
|
|
int __init kvm_arm_vmid_alloc_init(void)
|
2021-11-22 20:18:41 +08:00
|
|
|
{
|
|
|
|
kvm_arm_vmid_bits = kvm_get_vmid_bits();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Expect allocation after rollover to fail if we don't have
|
|
|
|
* at least one more VMID than CPUs. VMID #0 is always reserved.
|
|
|
|
*/
|
|
|
|
WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
|
|
|
|
atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
|
2023-04-19 05:47:38 +08:00
|
|
|
vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL);
|
2021-11-22 20:18:41 +08:00
|
|
|
if (!vmid_map)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
KVM: x86: Unify pr_fmt to use module name for all KVM modules
Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.
Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.
Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.
Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-12-01 07:09:18 +08:00
|
|
|
void __init kvm_arm_vmid_alloc_free(void)
|
2021-11-22 20:18:41 +08:00
|
|
|
{
|
2023-04-19 05:47:38 +08:00
|
|
|
bitmap_free(vmid_map);
|
2021-11-22 20:18:41 +08:00
|
|
|
}
|