mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-24 11:34:50 +08:00
x86/irq: Call irq_force_move_complete with irq descriptor
First of all there is no point in looking up the irq descriptor again, but we also need the descriptor for the final cleanup race fix in the next patch. Make that change seperate. No functional difference. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Borislav Petkov <bp@alien8.de> Tested-by: Joe Lawrence <joe.lawrence@stratus.com> Cc: Jiang Liu <jiang.liu@linux.intel.com> Cc: Jeremiah Mahler <jmmahler@gmail.com> Cc: andy.shevchenko@gmail.com Cc: Guenter Roeck <linux@roeck-us.net> Cc: stable@vger.kernel.org #4.3+ Link: http://lkml.kernel.org/r/20151231160107.125211743@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
56d7d2f4bb
commit
90a2282e23
@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
|
||||
|
||||
#define __ARCH_HAS_DO_SOFTIRQ
|
||||
|
||||
struct irq_desc;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#include <linux/cpumask.h>
|
||||
extern int check_irq_vectors_for_cpu_disable(void);
|
||||
extern void fixup_irqs(void);
|
||||
extern void irq_force_complete_move(int);
|
||||
extern void irq_force_complete_move(struct irq_desc *desc);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
|
||||
extern void (*x86_platform_ipi_callback)(void);
|
||||
extern void native_init_IRQ(void);
|
||||
|
||||
struct irq_desc;
|
||||
extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
|
||||
|
||||
extern __visible unsigned int do_IRQ(struct pt_regs *regs);
|
||||
|
@ -630,10 +630,14 @@ void irq_complete_move(struct irq_cfg *cfg)
|
||||
__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
|
||||
}
|
||||
|
||||
void irq_force_complete_move(int irq)
|
||||
/*
|
||||
* Called with @desc->lock held and interrupts disabled.
|
||||
*/
|
||||
void irq_force_complete_move(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_cfg *cfg = irq_cfg(irq);
|
||||
struct apic_chip_data *data;
|
||||
struct irq_data *irqdata = irq_desc_get_irq_data(desc);
|
||||
struct apic_chip_data *data = apic_chip_data(irqdata);
|
||||
struct irq_cfg *cfg = data ? &data->cfg : NULL;
|
||||
|
||||
if (!cfg)
|
||||
return;
|
||||
@ -647,7 +651,6 @@ void irq_force_complete_move(int irq)
|
||||
* the way out.
|
||||
*/
|
||||
raw_spin_lock(&vector_lock);
|
||||
data = container_of(cfg, struct apic_chip_data, cfg);
|
||||
cpumask_clear_cpu(smp_processor_id(), data->old_domain);
|
||||
raw_spin_unlock(&vector_lock);
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ void fixup_irqs(void)
|
||||
* non intr-remapping case, we can't wait till this interrupt
|
||||
* arrives at this cpu before completing the irq move.
|
||||
*/
|
||||
irq_force_complete_move(irq);
|
||||
irq_force_complete_move(desc);
|
||||
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
break_affinity = 1;
|
||||
|
Loading…
Reference in New Issue
Block a user