mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
KVM: arm/arm64: vgic: Make vgic_cpu->ap_list_lock a raw_spinlock
vgic_cpu->ap_list_lock must always be taken with interrupts disabled as it is used in interrupt context. For configurations such as PREEMPT_RT_FULL, this means that it should be a raw_spinlock since RT spinlocks are interruptible. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Acked-by: Christoffer Dall <christoffer.dall@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
This commit is contained in:
parent
fc3bc47523
commit
e08d8d2960
@ -307,7 +307,7 @@ struct vgic_cpu {
|
||||
unsigned int used_lrs;
|
||||
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
|
||||
|
||||
spinlock_t ap_list_lock; /* Protects the ap_list */
|
||||
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
|
||||
|
||||
/*
|
||||
* List of IRQs that this VCPU should consider because they are either
|
||||
|
@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
|
||||
|
||||
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
|
||||
spin_lock_init(&vgic_cpu->ap_list_lock);
|
||||
raw_spin_lock_init(&vgic_cpu->ap_list_lock);
|
||||
|
||||
/*
|
||||
* Enable and configure all SGIs to be edge-triggered and
|
||||
|
@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
|
||||
* When taking more than one ap_list_lock at the same time, always take the
|
||||
* lowest numbered VCPU's ap_list_lock first, so:
|
||||
* vcpuX->vcpu_id < vcpuY->vcpu_id:
|
||||
* spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
|
||||
* spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
|
||||
* raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
|
||||
* raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
|
||||
*
|
||||
* Since the VGIC must support injecting virtual interrupts from ISRs, we have
|
||||
* to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
|
||||
* to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
|
||||
* spinlocks for any lock that may be taken while injecting an interrupt.
|
||||
*/
|
||||
|
||||
@ -351,7 +351,7 @@ retry:
|
||||
|
||||
/* someone can do stuff here, which we re-check below */
|
||||
|
||||
spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
|
||||
raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
|
||||
raw_spin_lock(&irq->irq_lock);
|
||||
|
||||
/*
|
||||
@ -368,7 +368,8 @@ retry:
|
||||
|
||||
if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
|
||||
raw_spin_unlock(&irq->irq_lock);
|
||||
spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
|
||||
flags);
|
||||
|
||||
raw_spin_lock_irqsave(&irq->irq_lock, flags);
|
||||
goto retry;
|
||||
@ -383,7 +384,7 @@ retry:
|
||||
irq->vcpu = vcpu;
|
||||
|
||||
raw_spin_unlock(&irq->irq_lock);
|
||||
spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
|
||||
|
||||
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
|
||||
kvm_vcpu_kick(vcpu);
|
||||
@ -597,7 +598,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
|
||||
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
|
||||
|
||||
retry:
|
||||
spin_lock(&vgic_cpu->ap_list_lock);
|
||||
raw_spin_lock(&vgic_cpu->ap_list_lock);
|
||||
|
||||
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
|
||||
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
|
||||
@ -638,7 +639,7 @@ retry:
|
||||
/* This interrupt looks like it has to be migrated. */
|
||||
|
||||
raw_spin_unlock(&irq->irq_lock);
|
||||
spin_unlock(&vgic_cpu->ap_list_lock);
|
||||
raw_spin_unlock(&vgic_cpu->ap_list_lock);
|
||||
|
||||
/*
|
||||
* Ensure locking order by always locking the smallest
|
||||
@ -652,9 +653,9 @@ retry:
|
||||
vcpuB = vcpu;
|
||||
}
|
||||
|
||||
spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
|
||||
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
|
||||
SINGLE_DEPTH_NESTING);
|
||||
raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
|
||||
raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
|
||||
SINGLE_DEPTH_NESTING);
|
||||
raw_spin_lock(&irq->irq_lock);
|
||||
|
||||
/*
|
||||
@ -676,8 +677,8 @@ retry:
|
||||
}
|
||||
|
||||
raw_spin_unlock(&irq->irq_lock);
|
||||
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
|
||||
spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
|
||||
raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
|
||||
raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
|
||||
|
||||
if (target_vcpu_needs_kick) {
|
||||
kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
|
||||
@ -687,7 +688,7 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
spin_unlock(&vgic_cpu->ap_list_lock);
|
||||
raw_spin_unlock(&vgic_cpu->ap_list_lock);
|
||||
}
|
||||
|
||||
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||
@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
|
||||
|
||||
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
|
||||
|
||||
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
||||
raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
||||
vgic_flush_lr_state(vcpu);
|
||||
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
||||
raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
|
||||
|
||||
if (can_access_vgic_from_kernel())
|
||||
vgic_restore_state(vcpu);
|
||||
@ -918,7 +919,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
|
||||
vgic_get_vmcr(vcpu, &vmcr);
|
||||
|
||||
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
|
||||
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
|
||||
|
||||
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
|
||||
raw_spin_lock(&irq->irq_lock);
|
||||
@ -931,7 +932,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
|
||||
|
||||
return pending;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user