A few ARM fixes:

- Dietmar Eggemann noticed an issue with IRQ migration during CPU hotplug
   stress testing.
 - Mathieu Desnoyers noticed that a previous fix broke optimised kprobes.
 - Robin Murphy noticed a case where we were not clearing the dma_ops.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIVAwUAXGVW//TnkBvkraxkAQK8wg/8D8TzU4nuO7r/0n6YXS6quJz15VRmK6uu
 c+geOFmQ5VOWYJZDJ94IWEjXd/W1X6JpN94zL+xefJ5Ct38YBjhyYqq41HGoxAtV
 YjwXZAY6kkjMR8HBW+Xkhn0WcefE+QjMNPjUQUtpfLTvQpiaBC94zCeiw6ttxBv5
 fkSlYUq1OGf8i75bBHBg7Nj2UFMn2so/1dYOr3OejVAq+b/r82D4bm0ykXiHFzrH
 RDT+VKcSjsxFCFBPqN/76Lf4rUTZqgIsl3h/LRBdvOGo3M/7t26ByJZ4p9nYgsDh
 VfeitZCxUHRIBoBjqPA+uAhb5G/Ya4Qf+fyauuQuMMRxpnjACCxG5OekG/+UMZEK
 4/8mhO8u33bphqHB+b6e6LRPke+EHC+quj3CLYgxyY6PKzSBk1tUSiGhxIbmR0Yw
 a4VtY31832BN5BSyCJvfYdl/+brs+Nujhjbzd66+3EvU0zgPISuXXBwfWkqmwshU
 S+Yj/BVo/wCwnGsqY6UFqtdEBL9J+Oe1J6I3Y6hzPd27egv7IIFWCGkL+gWPxWtd
 YbbgdfU3dL7fTw5j6Es6mdFkz+uWwWSfTs1ImB4EAYQzNgkJrfxJrbA6v1PfzLDP
 5sFMB3W8fwPfT7Qq11QpjEywn1+fi/JFrWLsW2xUAFVd79HPIbsEf8eZQF5OA0Qw
 4jDzQqOr9Sc=
 =KSCg
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
 "A few ARM fixes:

   - Dietmar Eggemann noticed an issue with IRQ migration during CPU
     hotplug stress testing.

   - Mathieu Desnoyers noticed that a previous fix broke optimised
     kprobes.

   - Robin Murphy noticed a case where we were not clearing the dma_ops"

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 8835/1: dma-mapping: Clear DMA ops on teardown
  ARM: 8834/1: Fix: kprobes: optimized kprobes illegal instruction
  ARM: 8824/1: fix a migrating irq bug when hotplug cpu
This commit is contained in:
Linus Torvalds 2019-02-18 09:59:28 -08:00
commit 3ddc14e25e
6 changed files with 5 additions and 65 deletions

View File

@ -1400,6 +1400,7 @@ config NR_CPUS
config HOTPLUG_CPU config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs" bool "Support for hot-pluggable CPUs"
depends on SMP depends on SMP
select GENERIC_IRQ_MIGRATION
help help
Say Y here to experiment with turning CPUs off and on. CPUs Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu. can be controlled through /sys/devices/system/cpu.

View File

@ -25,7 +25,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct irqaction; struct irqaction;
struct pt_regs; struct pt_regs;
extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *); extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *);

View File

@ -31,7 +31,6 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
return nr_irqs; return nr_irqs;
} }
#endif #endif
#ifdef CONFIG_HOTPLUG_CPU
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = irq_data_get_affinity_mask(d);
struct irq_chip *c;
bool ret = false;
/*
* If this is a per-CPU interrupt, or the affinity does not
* include this CPU, then we have nothing to do.
*/
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
affinity = cpu_online_mask;
ret = true;
}
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(irq_data_get_affinity_mask(d), affinity);
return ret;
}
/*
* The current CPU has been marked offline. Migrate IRQs off this CPU.
* If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*
* Note: we must iterate over all IRQs, whether they have an attached
* action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken)
pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, smp_processor_id());
}
local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */

View File

@ -254,7 +254,7 @@ int __cpu_disable(void)
/* /*
* OK - migrate IRQs away from this CPU * OK - migrate IRQs away from this CPU
*/ */
migrate_irqs(); irq_migrate_all_off_this_cpu();
/* /*
* Flush user cache and TLB mappings, and then remove this CPU * Flush user cache and TLB mappings, and then remove this CPU

View File

@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev)
return; return;
arm_teardown_iommu_dma_ops(dev); arm_teardown_iommu_dma_ops(dev);
/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
set_dma_ops(dev, NULL);
} }

View File

@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
} }
/* Copy arch-dep-instance from template. */ /* Copy arch-dep-instance from template. */
memcpy(code, (unsigned char *)optprobe_template_entry, memcpy(code, (unsigned long *)&optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t)); TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */ /* Adjust buffer according to instruction. */