KVM: PPC: Book 3S: XICS: Don't lock twice when checking for resend

This patch improves the code that takes lock twice to check the resend flag
and do the actual resending, by checking the resend flag locklessly, and
add a boolean parameter check_resend to icp_[rm_]deliver_irq(), so the
resend flag can be checked in the lock when doing the delivery.

We need make sure when we clear the ics's bit in the icp's resend_map, we
don't miss the resend flag of the irqs that set the bit. It could be
ordered through the barrier in test_and_clear_bit(), and a newly added
wmb between setting irq's resend flag, and icp's resend_map.

Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
Li Zhong 2016-11-11 12:57:36 +08:00 committed by Paul Mackerras
parent 17d48610ae
commit 21acd0e4df
2 changed files with 48 additions and 51 deletions

View File

@ -35,7 +35,7 @@ int kvm_irq_bypass = 1;
EXPORT_SYMBOL(kvm_irq_bypass);
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq);
u32 new_irq, bool check_resend);
static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
/* -- ICS routines -- */
@ -44,22 +44,12 @@ static void ics_rm_check_resend(struct kvmppc_xics *xics,
{
int i;
arch_spin_lock(&ics->lock);
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
struct ics_irq_state *state = &ics->irq_state[i];
if (!state->resend)
continue;
state->resend = 0;
arch_spin_unlock(&ics->lock);
icp_rm_deliver_irq(xics, icp, state->number);
arch_spin_lock(&ics->lock);
if (state->resend)
icp_rm_deliver_irq(xics, icp, state->number, true);
}
arch_spin_unlock(&ics->lock);
}
/* -- ICP routines -- */
@ -292,7 +282,7 @@ static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
}
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq)
u32 new_irq, bool check_resend)
{
struct ics_irq_state *state;
struct kvmppc_ics *ics;
@ -337,6 +327,10 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
}
if (check_resend)
if (!state->resend)
goto out;
/* Clear the resend bit of that interrupt */
state->resend = 0;
@ -384,6 +378,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
arch_spin_unlock(&ics->lock);
icp->n_reject++;
new_irq = reject;
check_resend = 0;
goto again;
}
} else {
@ -391,9 +386,15 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* We failed to deliver the interrupt we need to set the
* resend map bit and mark the ICS state as needing a resend
*/
set_bit(ics->icsid, icp->resend_map);
state->resend = 1;
/*
* Make sure when checking resend, we don't miss the resend
* if resend_map bit is seen and cleared.
*/
smp_wmb();
set_bit(ics->icsid, icp->resend_map);
/*
* If the need_resend flag got cleared in the ICP some time
* between icp_rm_try_to_deliver() atomic update and now, then
@ -404,6 +405,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
if (!icp->state.need_resend) {
state->resend = 0;
arch_spin_unlock(&ics->lock);
check_resend = 0;
goto again;
}
}
@ -598,7 +600,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
/* Handle reject in real mode */
if (reject && reject != XICS_IPI) {
this_icp->n_reject++;
icp_rm_deliver_irq(xics, icp, reject);
icp_rm_deliver_irq(xics, icp, reject, false);
}
/* Handle resends in real mode */
@ -666,7 +668,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
*/
if (reject && reject != XICS_IPI) {
icp->n_reject++;
icp_rm_deliver_irq(xics, icp, reject);
icp_rm_deliver_irq(xics, icp, reject, false);
}
bail:
return check_too_hard(xics, icp);
@ -704,7 +706,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
if (pq_new & PQ_PRESENTED)
icp_rm_deliver_irq(xics, NULL, irq);
icp_rm_deliver_irq(xics, NULL, irq, false);
if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
icp->rm_action |= XICS_RM_NOTIFY_EOI;
@ -874,7 +876,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
/* Test P=1, Q=0, this is the only case where we present */
if (pq_new == PQ_PRESENTED)
icp_rm_deliver_irq(xics, icp, irq);
icp_rm_deliver_irq(xics, icp, irq, false);
/* EOI the interrupt */
icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,

View File

@ -63,7 +63,7 @@
/* -- ICS routines -- */
static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq);
u32 new_irq, bool check_resend);
/*
* Return value ideally indicates how the interrupt was handled, but no
@ -117,7 +117,7 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
/* Test P=1, Q=0, this is the only case where we present */
if (pq_new == PQ_PRESENTED)
icp_deliver_irq(xics, NULL, irq);
icp_deliver_irq(xics, NULL, irq, false);
/* Record which CPU this arrived on for passed-through interrupts */
if (state->host_irq)
@ -131,31 +131,14 @@ static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
{
int i;
unsigned long flags;
local_irq_save(flags);
arch_spin_lock(&ics->lock);
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
struct ics_irq_state *state = &ics->irq_state[i];
if (!state->resend)
continue;
state->resend = 0;
XICS_DBG("resend %#x prio %#x\n", state->number,
state->priority);
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
icp_deliver_irq(xics, icp, state->number);
local_irq_save(flags);
arch_spin_lock(&ics->lock);
if (state->resend) {
XICS_DBG("resend %#x prio %#x\n", state->number,
state->priority);
icp_deliver_irq(xics, icp, state->number, true);
}
}
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
}
static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
@ -209,7 +192,7 @@ int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
state->masked_pending, state->resend);
if (write_xive(xics, ics, state, server, priority, priority))
icp_deliver_irq(xics, icp, irq);
icp_deliver_irq(xics, icp, irq, false);
return 0;
}
@ -262,7 +245,7 @@ int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
if (write_xive(xics, ics, state, state->server, state->saved_priority,
state->saved_priority))
icp_deliver_irq(xics, icp, irq);
icp_deliver_irq(xics, icp, irq, false);
return 0;
}
@ -396,7 +379,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
}
static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq)
u32 new_irq, bool check_resend)
{
struct ics_irq_state *state;
struct kvmppc_ics *ics;
@ -442,6 +425,10 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
}
if (check_resend)
if (!state->resend)
goto out;
/* Clear the resend bit of that interrupt */
state->resend = 0;
@ -490,6 +477,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
new_irq = reject;
check_resend = 0;
goto again;
}
} else {
@ -497,9 +485,15 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* We failed to deliver the interrupt we need to set the
* resend map bit and mark the ICS state as needing a resend
*/
set_bit(ics->icsid, icp->resend_map);
state->resend = 1;
/*
* Make sure when checking resend, we don't miss the resend
* if resend_map bit is seen and cleared.
*/
smp_wmb();
set_bit(ics->icsid, icp->resend_map);
/*
* If the need_resend flag got cleared in the ICP some time
* between icp_try_to_deliver() atomic update and now, then
@ -511,6 +505,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
state->resend = 0;
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
check_resend = 0;
goto again;
}
}
@ -702,7 +697,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
/* Handle reject */
if (reject && reject != XICS_IPI)
icp_deliver_irq(xics, icp, reject);
icp_deliver_irq(xics, icp, reject, false);
/* Handle resend */
if (resend)
@ -782,7 +777,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
* attempt (see comments in icp_deliver_irq).
*/
if (reject && reject != XICS_IPI)
icp_deliver_irq(xics, icp, reject);
icp_deliver_irq(xics, icp, reject, false);
}
static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
@ -818,7 +813,7 @@ static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
if (pq_new & PQ_PRESENTED)
icp_deliver_irq(xics, icp, irq);
icp_deliver_irq(xics, icp, irq, false);
kvm_notify_acked_irq(vcpu->kvm, 0, irq);
@ -1307,7 +1302,7 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
local_irq_restore(flags);
if (val & KVM_XICS_PENDING)
icp_deliver_irq(xics, NULL, irqp->number);
icp_deliver_irq(xics, NULL, irqp->number, false);
return 0;
}