mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-11 21:14:07 +08:00
d340ebd696
The upcoming fix for the -EBUSY return from affinity settings requires to
use the irq_move_irq() functionality even on irq remapped interrupts. To
avoid the out of line call, move the check for the pending bit into an
inline helper.
Preparatory change for the real fix. No functional change.
Fixes: dccfe3147b
("x86/vector: Simplify vector move cleanup")
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <liu.song.a23@gmail.com>
Cc: Dmitry Safonov <0x7f454c46@gmail.com>
Cc: stable@vger.kernel.org
Cc: Mike Travis <mike.travis@hpe.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Tariq Toukan <tariqt@mellanox.com>
Cc: Dou Liyang <douly.fnst@cn.fujitsu.com>
Link: https://lkml.kernel.org/r/20180604162224.471925894@linutronix.de
120 lines
3.1 KiB
C
120 lines
3.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/irq.h>
|
|
#include <linux/interrupt.h>
|
|
|
|
#include "internals.h"
|
|
|
|
/**
|
|
* irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
|
|
* @desc: Interrupt descpriptor to clean up
|
|
* @force_clear: If set clear the move pending bit unconditionally.
|
|
* If not set, clear it only when the dying CPU is the
|
|
* last one in the pending mask.
|
|
*
|
|
* Returns true if the pending bit was set and the pending mask contains an
|
|
* online CPU other than the dying CPU.
|
|
*/
|
|
bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
|
|
{
|
|
struct irq_data *data = irq_desc_get_irq_data(desc);
|
|
|
|
if (!irqd_is_setaffinity_pending(data))
|
|
return false;
|
|
|
|
/*
|
|
* The outgoing CPU might be the last online target in a pending
|
|
* interrupt move. If that's the case clear the pending move bit.
|
|
*/
|
|
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
|
|
irqd_clr_move_pending(data);
|
|
return false;
|
|
}
|
|
if (force_clear)
|
|
irqd_clr_move_pending(data);
|
|
return true;
|
|
}
|
|
|
|
void irq_move_masked_irq(struct irq_data *idata)
|
|
{
|
|
struct irq_desc *desc = irq_data_to_desc(idata);
|
|
struct irq_data *data = &desc->irq_data;
|
|
struct irq_chip *chip = data->chip;
|
|
|
|
if (likely(!irqd_is_setaffinity_pending(data)))
|
|
return;
|
|
|
|
irqd_clr_move_pending(data);
|
|
|
|
/*
|
|
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
|
*/
|
|
if (irqd_is_per_cpu(data)) {
|
|
WARN_ON(1);
|
|
return;
|
|
}
|
|
|
|
if (unlikely(cpumask_empty(desc->pending_mask)))
|
|
return;
|
|
|
|
if (!chip->irq_set_affinity)
|
|
return;
|
|
|
|
assert_raw_spin_locked(&desc->lock);
|
|
|
|
/*
|
|
* If there was a valid mask to work with, please
|
|
* do the disable, re-program, enable sequence.
|
|
* This is *not* particularly important for level triggered
|
|
* but in a edge trigger case, we might be setting rte
|
|
* when an active trigger is coming in. This could
|
|
* cause some ioapics to mal-function.
|
|
* Being paranoid i guess!
|
|
*
|
|
* For correct operation this depends on the caller
|
|
* masking the irqs.
|
|
*/
|
|
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
|
|
int ret;
|
|
|
|
ret = irq_do_set_affinity(data, desc->pending_mask, false);
|
|
/*
|
|
* If the there is a cleanup pending in the underlying
|
|
* vector management, reschedule the move for the next
|
|
* interrupt. Leave desc->pending_mask intact.
|
|
*/
|
|
if (ret == -EBUSY) {
|
|
irqd_set_move_pending(data);
|
|
return;
|
|
}
|
|
}
|
|
cpumask_clear(desc->pending_mask);
|
|
}
|
|
|
|
void __irq_move_irq(struct irq_data *idata)
|
|
{
|
|
bool masked;
|
|
|
|
/*
|
|
* Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
|
|
* and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
|
|
* disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
|
|
*/
|
|
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
|
|
|
|
if (unlikely(irqd_irq_disabled(idata)))
|
|
return;
|
|
|
|
/*
|
|
* Be careful vs. already masked interrupts. If this is a
|
|
* threaded interrupt with ONESHOT set, we can end up with an
|
|
* interrupt storm.
|
|
*/
|
|
masked = irqd_irq_masked(idata);
|
|
if (!masked)
|
|
idata->chip->irq_mask(idata);
|
|
irq_move_masked_irq(idata);
|
|
if (!masked)
|
|
idata->chip->irq_unmask(idata);
|
|
}
|