mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-01 09:43:34 +08:00
KVM: Don't actually set a request when evicting vCPUs for GFN cache invd
Don't actually set a request bit in vcpu->requests when making a request purely to force a vCPU to exit the guest. Logging a request but not actually consuming it would cause the vCPU to get stuck in an infinite loop during KVM_RUN because KVM would see the pending request and bail from VM-Enter to service the request. Note, it's currently impossible for KVM to set KVM_REQ_GPC_INVALIDATE as nothing in KVM is wired up to set guest_uses_pa=true. But, it'd be all too easy for arch code to introduce use of kvm_gfn_to_pfn_cache_init() without implementing handling of the request, especially since getting test coverage of MMU notifier interaction with specific KVM features usually requires a directed test. Opportunistically rename gfn_to_pfn_cache_invalidate_start()'s wake_vcpus to evict_vcpus. The purpose of the request is to get vCPUs out of guest mode, it's supposed to _avoid_ waking vCPUs that are blocking. Opportunistically rename KVM_REQ_GPC_INVALIDATE to be more specific as to what it wants to accomplish, and to genericize the name so that it can used for similar but unrelated scenarios, should they arise in the future. Add a comment and documentation to explain why the "no action" request exists. Add compile-time assertions to help detect improper usage. Use the inner assertless helper in the one s390 path that makes requests without a hardcoded request. Cc: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20220223165302.3205276-1-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
79593c086e
commit
df06dae3f2
@ -135,6 +135,16 @@ KVM_REQ_UNHALT
|
||||
such as a pending signal, which does not indicate the VCPU's halt
|
||||
emulation should stop, and therefore does not make the request.
|
||||
|
||||
KVM_REQ_OUTSIDE_GUEST_MODE
|
||||
|
||||
This "request" ensures the target vCPU has exited guest mode prior to the
|
||||
sender of the request continuing on. No action needs be taken by the target,
|
||||
and so no request is actually logged for the target. This request is similar
|
||||
to a "kick", but unlike a kick it guarantees the vCPU has actually exited
|
||||
guest mode. A kick only guarantees the vCPU will exit at some point in the
|
||||
future, e.g. a previous kick may have started the process, but there's no
|
||||
guarantee the to-be-kicked vCPU has fully exited guest mode.
|
||||
|
||||
KVM_REQUEST_MASK
|
||||
----------------
|
||||
|
||||
|
@ -3463,7 +3463,7 @@ void exit_sie(struct kvm_vcpu *vcpu)
|
||||
/* Kick a guest cpu out of SIE to process a request synchronously */
|
||||
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_make_request(req, vcpu);
|
||||
__kvm_make_request(req, vcpu);
|
||||
kvm_s390_vcpu_request(vcpu);
|
||||
}
|
||||
|
||||
|
@ -148,6 +148,7 @@ static inline bool is_error_page(struct page *page)
|
||||
#define KVM_REQUEST_MASK GENMASK(7,0)
|
||||
#define KVM_REQUEST_NO_WAKEUP BIT(8)
|
||||
#define KVM_REQUEST_WAIT BIT(9)
|
||||
#define KVM_REQUEST_NO_ACTION BIT(10)
|
||||
/*
|
||||
* Architecture-independent vcpu->requests bit members
|
||||
* Bits 4-7 are reserved for more arch-independent bits.
|
||||
@ -156,9 +157,18 @@ static inline bool is_error_page(struct page *page)
|
||||
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_UNBLOCK 2
|
||||
#define KVM_REQ_UNHALT 3
|
||||
#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQUEST_ARCH_BASE 8
|
||||
|
||||
/*
|
||||
* KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
|
||||
* OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
|
||||
* in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
|
||||
* on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
|
||||
* kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
|
||||
* guarantee the vCPU received an IPI and has actually exited guest mode.
|
||||
*/
|
||||
#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
|
||||
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
|
||||
BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
|
||||
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
|
||||
@ -1222,7 +1232,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
* @vcpu: vCPU to be used for marking pages dirty and to be woken on
|
||||
* invalidation.
|
||||
* @guest_uses_pa: indicates that the resulting host physical PFN is used while
|
||||
* @vcpu is IN_GUEST_MODE so invalidations should wake it.
|
||||
* @vcpu is IN_GUEST_MODE; invalidations of the cache from MMU
|
||||
* notifiers (but not for KVM memslot changes!) will also force
|
||||
* @vcpu to exit the guest to refresh the cache.
|
||||
* @kernel_map: requests a kernel virtual mapping (kmap / memremap).
|
||||
* @gpa: guest physical address to map.
|
||||
* @len: sanity check; the range being access must fit a single page.
|
||||
@ -1233,10 +1245,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
* -EFAULT for an untranslatable guest physical address.
|
||||
*
|
||||
* This primes a gfn_to_pfn_cache and links it into the @kvm's list for
|
||||
* invalidations to be processed. Invalidation callbacks to @vcpu using
|
||||
* %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM
|
||||
* memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check()
|
||||
* to ensure that the cache is valid before accessing the target page.
|
||||
* invalidations to be processed. Callers are required to use
|
||||
* kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
|
||||
* accessing the target page.
|
||||
*/
|
||||
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
|
||||
struct kvm_vcpu *vcpu, bool guest_uses_pa,
|
||||
@ -1984,7 +1995,7 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
|
||||
|
||||
void kvm_arch_irq_routing_update(struct kvm *kvm);
|
||||
|
||||
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
|
||||
static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Ensure the rest of the request is published to kvm_check_request's
|
||||
@ -1994,6 +2005,19 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
|
||||
set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
|
||||
}
|
||||
|
||||
static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Request that don't require vCPU action should never be logged in
|
||||
* vcpu->requests. The vCPU won't clear the request, so it will stay
|
||||
* logged indefinitely and prevent the vCPU from entering the guest.
|
||||
*/
|
||||
BUILD_BUG_ON(!__builtin_constant_p(req) ||
|
||||
(req & KVM_REQUEST_NO_ACTION));
|
||||
|
||||
__kvm_make_request(req, vcpu);
|
||||
}
|
||||
|
||||
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return READ_ONCE(vcpu->requests);
|
||||
|
@ -253,7 +253,8 @@ static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
|
||||
{
|
||||
int cpu;
|
||||
|
||||
kvm_make_request(req, vcpu);
|
||||
if (likely(!(req & KVM_REQUEST_NO_ACTION)))
|
||||
__kvm_make_request(req, vcpu);
|
||||
|
||||
if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
|
||||
return;
|
||||
|
@ -27,7 +27,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||
{
|
||||
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
struct gfn_to_pfn_cache *gpc;
|
||||
bool wake_vcpus = false;
|
||||
bool evict_vcpus = false;
|
||||
|
||||
spin_lock(&kvm->gpc_lock);
|
||||
list_for_each_entry(gpc, &kvm->gpc_list, list) {
|
||||
@ -40,11 +40,11 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||
|
||||
/*
|
||||
* If a guest vCPU could be using the physical address,
|
||||
* it needs to be woken.
|
||||
* it needs to be forced out of guest mode.
|
||||
*/
|
||||
if (gpc->guest_uses_pa) {
|
||||
if (!wake_vcpus) {
|
||||
wake_vcpus = true;
|
||||
if (!evict_vcpus) {
|
||||
evict_vcpus = true;
|
||||
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
|
||||
}
|
||||
__set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
|
||||
@ -67,14 +67,18 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
|
||||
}
|
||||
spin_unlock(&kvm->gpc_lock);
|
||||
|
||||
if (wake_vcpus) {
|
||||
unsigned int req = KVM_REQ_GPC_INVALIDATE;
|
||||
if (evict_vcpus) {
|
||||
/*
|
||||
* KVM needs to ensure the vCPU is fully out of guest context
|
||||
* before allowing the invalidation to continue.
|
||||
*/
|
||||
unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
|
||||
bool called;
|
||||
|
||||
/*
|
||||
* If the OOM reaper is active, then all vCPUs should have
|
||||
* been stopped already, so perform the request without
|
||||
* KVM_REQUEST_WAIT and be sad if any needed to be woken.
|
||||
* KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
|
||||
*/
|
||||
if (!may_block)
|
||||
req &= ~KVM_REQUEST_WAIT;
|
||||
|
Loading…
Reference in New Issue
Block a user