mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-26 12:34:41 +08:00
14243b3871
This adds basic support for delivering 2 level event channels to a guest. Initially, it only supports delivery via the IRQ routing table, triggered by an eventfd. In order to do so, it has a kvm_xen_set_evtchn_fast() function which will use the pre-mapped shared_info page if it already exists and is still valid, while the slow path through the irqfd_inject workqueue will remap the shared_info page if necessary. It sets the bits in the shared_info page but not the vcpu_info; that is deferred to __kvm_xen_has_interrupt() which raises the vector to the appropriate vCPU. Add a 'verbose' mode to xen_shinfo_test while adding test cases for this. Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Message-Id: <20211210163625.2886-5-dwmw2@infradead.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
153 lines
4.1 KiB
C
153 lines
4.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
|
|
* Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
*
|
|
* KVM Xen emulation
|
|
*/
|
|
|
|
#ifndef __ARCH_X86_KVM_XEN_H__
|
|
#define __ARCH_X86_KVM_XEN_H__
|
|
|
|
#ifdef CONFIG_KVM_XEN
|
|
#include <linux/jump_label_ratelimit.h>
|
|
|
|
extern struct static_key_false_deferred kvm_xen_enabled;
|
|
|
|
int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
|
|
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
|
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
|
|
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
|
|
int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
|
|
int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
|
|
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
|
|
void kvm_xen_init_vm(struct kvm *kvm);
|
|
void kvm_xen_destroy_vm(struct kvm *kvm);
|
|
|
|
int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
|
|
struct kvm *kvm);
|
|
int kvm_xen_setup_evtchn(struct kvm *kvm,
|
|
struct kvm_kernel_irq_routing_entry *e,
|
|
const struct kvm_irq_routing_entry *ue);
|
|
|
|
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
|
{
|
|
return static_branch_unlikely(&kvm_xen_enabled.key) &&
|
|
kvm->arch.xen_hvm_config.msr;
|
|
}
|
|
|
|
static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
|
|
{
|
|
return static_branch_unlikely(&kvm_xen_enabled.key) &&
|
|
(kvm->arch.xen_hvm_config.flags &
|
|
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
|
|
}
|
|
|
|
static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (static_branch_unlikely(&kvm_xen_enabled.key) &&
|
|
vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector)
|
|
return __kvm_xen_has_interrupt(vcpu);
|
|
|
|
return 0;
|
|
}
|
|
#else
|
|
static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline void kvm_xen_init_vm(struct kvm *kvm)
|
|
{
|
|
}
|
|
|
|
static inline void kvm_xen_destroy_vm(struct kvm *kvm)
|
|
{
|
|
}
|
|
|
|
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
int kvm_xen_hypercall(struct kvm_vcpu *vcpu);
|
|
|
|
#include <asm/pvclock-abi.h>
|
|
#include <asm/xen/interface.h>
|
|
#include <xen/interface/vcpu.h>
|
|
|
|
void kvm_xen_update_runstate_guest(struct kvm_vcpu *vcpu, int state);
|
|
|
|
static inline void kvm_xen_runstate_set_running(struct kvm_vcpu *vcpu)
|
|
{
|
|
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_running);
|
|
}
|
|
|
|
static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
|
|
{
|
|
/*
|
|
* If the vCPU wasn't preempted but took a normal exit for
|
|
* some reason (hypercalls, I/O, etc.), that is accounted as
|
|
* still RUNSTATE_running, as the VMM is still operating on
|
|
* behalf of the vCPU. Only if the VMM does actually block
|
|
* does it need to enter RUNSTATE_blocked.
|
|
*/
|
|
if (vcpu->preempted)
|
|
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
|
|
}
|
|
|
|
/* 32-bit compatibility definitions, also used natively in 32-bit build */
|
|
struct compat_arch_vcpu_info {
|
|
unsigned int cr2;
|
|
unsigned int pad[5];
|
|
};
|
|
|
|
struct compat_vcpu_info {
|
|
uint8_t evtchn_upcall_pending;
|
|
uint8_t evtchn_upcall_mask;
|
|
uint16_t pad;
|
|
uint32_t evtchn_pending_sel;
|
|
struct compat_arch_vcpu_info arch;
|
|
struct pvclock_vcpu_time_info time;
|
|
}; /* 64 bytes (x86) */
|
|
|
|
struct compat_arch_shared_info {
|
|
unsigned int max_pfn;
|
|
unsigned int pfn_to_mfn_frame_list_list;
|
|
unsigned int nmi_reason;
|
|
unsigned int p2m_cr3;
|
|
unsigned int p2m_vaddr;
|
|
unsigned int p2m_generation;
|
|
uint32_t wc_sec_hi;
|
|
};
|
|
|
|
struct compat_shared_info {
|
|
struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS];
|
|
uint32_t evtchn_pending[32];
|
|
uint32_t evtchn_mask[32];
|
|
struct pvclock_wall_clock wc;
|
|
struct compat_arch_shared_info arch;
|
|
};
|
|
|
|
#define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
|
|
sizeof_field(struct compat_shared_info, \
|
|
evtchn_pending))
|
|
struct compat_vcpu_runstate_info {
|
|
int state;
|
|
uint64_t state_entry_time;
|
|
uint64_t time[4];
|
|
} __attribute__((packed));
|
|
|
|
#endif /* __ARCH_X86_KVM_XEN_H__ */
|