mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 09:14:19 +08:00
a614a610ac
If an interrupt is marked with the no balancing flag, we still allow setting the affinity for such an interrupt from the kernel itself, but for interrupts which move the affinity from interrupt context via irq_move_mask_irq() this runs into a check for the no balancing flag, which in turn ends up with an endless storm of stack dumps because the move pending flag is not reset. Allow the move for interrupts which have the no balancing flag set and clear the move pending bit before checking for interrupts with the per cpu flag set. Reported-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiang Liu <jiang.liu@linux.intel.com> Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1506201002570.4107@nanos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
80 lines
2.0 KiB
C
80 lines
2.0 KiB
C
|
|
#include <linux/irq.h>
|
|
#include <linux/interrupt.h>
|
|
|
|
#include "internals.h"
|
|
|
|
void irq_move_masked_irq(struct irq_data *idata)
|
|
{
|
|
struct irq_desc *desc = irq_data_to_desc(idata);
|
|
struct irq_chip *chip = desc->irq_data.chip;
|
|
|
|
if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
|
|
return;
|
|
|
|
irqd_clr_move_pending(&desc->irq_data);
|
|
|
|
/*
|
|
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
|
*/
|
|
if (irqd_is_per_cpu(&desc->irq_data)) {
|
|
WARN_ON(1);
|
|
return;
|
|
}
|
|
|
|
if (unlikely(cpumask_empty(desc->pending_mask)))
|
|
return;
|
|
|
|
if (!chip->irq_set_affinity)
|
|
return;
|
|
|
|
assert_raw_spin_locked(&desc->lock);
|
|
|
|
/*
|
|
* If there was a valid mask to work with, please
|
|
* do the disable, re-program, enable sequence.
|
|
* This is *not* particularly important for level triggered
|
|
* but in a edge trigger case, we might be setting rte
|
|
* when an active trigger is coming in. This could
|
|
* cause some ioapics to mal-function.
|
|
* Being paranoid i guess!
|
|
*
|
|
* For correct operation this depends on the caller
|
|
* masking the irqs.
|
|
*/
|
|
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
|
|
irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
|
|
|
|
cpumask_clear(desc->pending_mask);
|
|
}
|
|
|
|
void irq_move_irq(struct irq_data *idata)
|
|
{
|
|
bool masked;
|
|
|
|
/*
|
|
* Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
|
|
* and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
|
|
* disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
|
|
*/
|
|
idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
|
|
|
|
if (likely(!irqd_is_setaffinity_pending(idata)))
|
|
return;
|
|
|
|
if (unlikely(irqd_irq_disabled(idata)))
|
|
return;
|
|
|
|
/*
|
|
* Be careful vs. already masked interrupts. If this is a
|
|
* threaded interrupt with ONESHOT set, we can end up with an
|
|
* interrupt storm.
|
|
*/
|
|
masked = irqd_irq_masked(idata);
|
|
if (!masked)
|
|
idata->chip->irq_mask(idata);
|
|
irq_move_masked_irq(idata);
|
|
if (!masked)
|
|
idata->chip->irq_unmask(idata);
|
|
}
|