mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 16:54:20 +08:00
KVM: x86: Shove vp_bitmap handling down into sparse_set_to_vcpu_mask()
Move the vp_bitmap "allocation" that's needed to handle mismatched vp_index
values down into sparse_set_to_vcpu_mask() and drop __always_inline from
said helper. The need for an intermediate vp_bitmap is a detail that's
specific to the sparse translation with mismatched VP<=>vCPU indexes and
does not need to be exposed to the caller.
Regarding the __always_inline, prior to commit f21dd49450
("KVM: x86:
hyperv: optimize sparse VP set processing") the helper, then named
hv_vcpu_in_sparse_set(), was a tiny bit of code that effectively boiled
down to a handful of bit ops. The __always_inline was understandable, if
not justifiable. Since the aforementioned change, sparse_set_to_vcpu_mask()
is a chunky 350-450+ bytes of code without KASAN=y, and balloons to 1100+
with KASAN=y. In other words, it has no business being forcefully inlined.
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20211207220926.718794-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
79661c3766
commit
9c52f6b3d8
@ -1713,32 +1713,47 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
|
||||
return kvm_hv_get_msr(vcpu, msr, pdata, host);
|
||||
}
|
||||
|
||||
static __always_inline unsigned long *sparse_set_to_vcpu_mask(
|
||||
struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
|
||||
u64 *vp_bitmap, unsigned long *vcpu_bitmap)
|
||||
static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
|
||||
u64 valid_bank_mask, unsigned long *vcpu_mask)
|
||||
{
|
||||
struct kvm_hv *hv = to_kvm_hv(kvm);
|
||||
bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
|
||||
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
|
||||
struct kvm_vcpu *vcpu;
|
||||
int bank, sbank = 0;
|
||||
unsigned long i;
|
||||
u64 *bitmap;
|
||||
|
||||
memset(vp_bitmap, 0,
|
||||
KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
|
||||
BUILD_BUG_ON(sizeof(vp_bitmap) >
|
||||
sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
|
||||
|
||||
/*
|
||||
* If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
|
||||
* fill a temporary buffer and manually test each vCPU's VP index.
|
||||
*/
|
||||
if (likely(!has_mismatch))
|
||||
bitmap = (u64 *)vcpu_mask;
|
||||
else
|
||||
bitmap = vp_bitmap;
|
||||
|
||||
/*
|
||||
* Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
|
||||
* having a '1' for each bank that exists in sparse_banks. Sets must
|
||||
* be in ascending order, i.e. bank0..bankN.
|
||||
*/
|
||||
memset(bitmap, 0, sizeof(vp_bitmap));
|
||||
for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
|
||||
KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
|
||||
vp_bitmap[bank] = sparse_banks[sbank++];
|
||||
bitmap[bank] = sparse_banks[sbank++];
|
||||
|
||||
if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
|
||||
/* for all vcpus vp_index == vcpu_idx */
|
||||
return (unsigned long *)vp_bitmap;
|
||||
}
|
||||
if (likely(!has_mismatch))
|
||||
return;
|
||||
|
||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
|
||||
__set_bit(i, vcpu_bitmap);
|
||||
__set_bit(i, vcpu_mask);
|
||||
}
|
||||
return vcpu_bitmap;
|
||||
}
|
||||
|
||||
struct kvm_hv_hcall {
|
||||
@ -1775,9 +1790,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct hv_tlb_flush_ex flush_ex;
|
||||
struct hv_tlb_flush flush;
|
||||
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
unsigned long *vcpu_mask;
|
||||
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
|
||||
u64 valid_bank_mask;
|
||||
u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
|
||||
bool all_cpus;
|
||||
@ -1870,11 +1883,9 @@ do_flush:
|
||||
if (all_cpus) {
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST);
|
||||
} else {
|
||||
vcpu_mask = sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
|
||||
vp_bitmap, vcpu_bitmap);
|
||||
sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
|
||||
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
|
||||
vcpu_mask);
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, vcpu_mask);
|
||||
}
|
||||
|
||||
ret_success:
|
||||
@ -1907,9 +1918,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct hv_send_ipi_ex send_ipi_ex;
|
||||
struct hv_send_ipi send_ipi;
|
||||
u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
unsigned long *vcpu_mask;
|
||||
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
|
||||
unsigned long valid_bank_mask;
|
||||
u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
|
||||
u32 vector;
|
||||
@ -1965,11 +1974,13 @@ check_and_send_ipi:
|
||||
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
|
||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||
|
||||
vcpu_mask = all_cpus ? NULL :
|
||||
sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
|
||||
vp_bitmap, vcpu_bitmap);
|
||||
if (all_cpus) {
|
||||
kvm_send_ipi_to_many(kvm, vector, NULL);
|
||||
} else {
|
||||
sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
|
||||
|
||||
kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
|
||||
kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
|
||||
}
|
||||
|
||||
ret_success:
|
||||
return HV_STATUS_SUCCESS;
|
||||
|
Loading…
Reference in New Issue
Block a user