mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 22:53:55 +08:00
KVM: x86: check_nested_events is never NULL
Both Intel and AMD now implement it, so there is no need to check if the callback is implemented. Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
1d2c6c9bd4
commit
56083bdf67
@ -7699,7 +7699,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
|
||||
* from L2 to L1 due to pending L1 events which require exit
|
||||
* from L2 to L1.
|
||||
*/
|
||||
if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) {
|
||||
if (is_guest_mode(vcpu)) {
|
||||
r = kvm_x86_ops.check_nested_events(vcpu);
|
||||
if (r != 0)
|
||||
return r;
|
||||
@ -7761,7 +7761,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
|
||||
* proposal and current concerns. Perhaps we should be setting
|
||||
* KVM_REQ_EVENT only on certain events and not unconditionally?
|
||||
*/
|
||||
if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) {
|
||||
if (is_guest_mode(vcpu)) {
|
||||
r = kvm_x86_ops.check_nested_events(vcpu);
|
||||
if (r != 0)
|
||||
return r;
|
||||
@ -8527,7 +8527,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events)
|
||||
if (is_guest_mode(vcpu))
|
||||
kvm_x86_ops.check_nested_events(vcpu);
|
||||
|
||||
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
||||
|
Loading…
Reference in New Issue
Block a user