2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-12 23:33:55 +08:00

Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer fixes from Ingo Molnar:
 "Main changes:

  - Fix the deadlock reported by Dave Jones et al
  - Clean up and fix nohz_full interaction with arch abilities
  - nohz init code consolidation/cleanup"

* 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  nohz: nohz full depends on irq work self IPI support
  nohz: Consolidate nohz full init code
  arm64: Tell irq work about self IPI support
  arm: Tell irq work about self IPI support
  x86: Tell irq work about self IPI support
  irq_work: Force raised irq work to run on irq work interrupt
  irq_work: Introduce arch_irq_work_has_interrupt()
  nohz: Move nohz full init call to tick init
This commit is contained in:
Linus Torvalds 2014-10-09 06:30:57 -04:00
commit afa3536be8
43 changed files with 133 additions and 29 deletions

View File

@ -4,6 +4,7 @@ generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += scatterlist.h

View File

@ -18,6 +18,7 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kmap_types.h
generic-y += kvm_para.h
generic-y += local.h

View File

@ -0,0 +1,11 @@
#ifndef __ASM_ARM_IRQ_WORK_H
#define __ASM_ARM_IRQ_WORK_H
#include <asm/smp_plat.h>
static inline bool arch_irq_work_has_interrupt(void)
{
return is_smp();
}
#endif /* _ASM_ARM_IRQ_WORK_H */

View File

@ -499,7 +499,7 @@ void arch_send_call_function_single_ipi(int cpu)
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
if (is_smp())
if (arch_irq_work_has_interrupt())
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
}
#endif

View File

@ -9,8 +9,8 @@ generic-y += current.h
generic-y += delay.h
generic-y += div64.h
generic-y += dma.h
generic-y += emergency-restart.h
generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += ftrace.h
generic-y += hash.h

View File

@ -0,0 +1,11 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
#include <asm/smp.h>
static inline bool arch_irq_work_has_interrupt(void)
{
return !!__smp_cross_call;
}
#endif /* __ASM_IRQ_WORK_H */

View File

@ -48,6 +48,8 @@ extern void smp_init_cpus(void);
*/
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
/*
* Called from the secondary holding pen, this is the secondary CPU entry point.
*/

View File

@ -470,7 +470,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{

View File

@ -9,6 +9,7 @@ generic-y += exec.h
generic-y += futex.h
generic-y += hash.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -15,6 +15,7 @@ generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h

View File

@ -22,6 +22,7 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h

View File

@ -8,6 +8,7 @@ generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += linkage.h
generic-y += mcs_spinlock.h

View File

@ -3,6 +3,7 @@ generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += scatterlist.h

View File

@ -23,6 +23,7 @@ generic-y += ioctls.h
generic-y += iomap.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h

View File

@ -2,6 +2,7 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += preempt.h

View File

@ -3,6 +3,7 @@ generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += module.h
generic-y += preempt.h

View File

@ -11,6 +11,7 @@ generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h

View File

@ -19,6 +19,7 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h

View File

@ -5,6 +5,7 @@ generic-y += cputime.h
generic-y += device.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += scatterlist.h

View File

@ -3,6 +3,7 @@ generic-y += cputime.h
generic-y += current.h
generic-y += emergency-restart.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mutex.h

View File

@ -4,6 +4,7 @@ generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += scatterlist.h

View File

@ -31,6 +31,7 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h

View File

@ -10,6 +10,7 @@ generic-y += exec.h
generic-y += hash.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kvm_para.h
generic-y += local.h

View File

@ -1,6 +1,7 @@
generic-y += clkdev.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h

View File

@ -2,6 +2,7 @@
generic-y += clkdev.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += scatterlist.h

View File

@ -6,6 +6,7 @@ generic-y += barrier.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += hash.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += scatterlist.h

View File

@ -12,6 +12,7 @@ generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h

View File

@ -8,6 +8,7 @@ generic-y += emergency-restart.h
generic-y += exec.h
generic-y += hash.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += linkage.h
generic-y += local.h
generic-y += local64.h

View File

@ -17,6 +17,7 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -14,6 +14,7 @@ generic-y += hash.h
generic-y += hw_irq.h
generic-y += io.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mutex.h

View File

@ -22,6 +22,7 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += local.h

View File

@ -0,0 +1,11 @@
#ifndef _ASM_IRQ_WORK_H
#define _ASM_IRQ_WORK_H
#include <asm/processor.h>
static inline bool arch_irq_work_has_interrupt(void)
{
return cpu_has_apic;
}
#endif /* _ASM_IRQ_WORK_H */

View File

@ -41,7 +41,7 @@ __visible void smp_trace_irq_work_interrupt(struct pt_regs *regs)
void arch_irq_work_raise(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
if (!cpu_has_apic)
if (!arch_irq_work_has_interrupt())
return;
apic->send_IPI_self(IRQ_WORK_VECTOR);

View File

@ -12,6 +12,7 @@ generic-y += hardirq.h
generic-y += hash.h
generic-y += ioctl.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h

View File

@ -0,0 +1,10 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
static inline bool arch_irq_work_has_interrupt(void)
{
return false;
}
#endif /* __ASM_IRQ_WORK_H */

View File

@ -39,9 +39,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu);
#endif
void irq_work_run(void);
void irq_work_tick(void);
void irq_work_sync(struct irq_work *work);
#ifdef CONFIG_IRQ_WORK
#include <asm/irq_work.h>
bool irq_work_needs_cpu(void);
#else
static inline bool irq_work_needs_cpu(void) { return false; }

View File

@ -181,14 +181,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
}
extern void tick_nohz_init(void);
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
static inline void tick_nohz_init(void) { }
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void __tick_nohz_full_check(void) { }

View File

@ -577,7 +577,6 @@ asmlinkage __visible void __init start_kernel(void)
local_irq_disable();
idr_init_cache();
rcu_init();
tick_nohz_init();
context_tracking_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */

View File

@ -115,8 +115,10 @@ bool irq_work_needs_cpu(void)
raised = &__get_cpu_var(raised_list);
lazy = &__get_cpu_var(lazy_list);
if (llist_empty(raised) && llist_empty(lazy))
return false;
if (llist_empty(raised) || arch_irq_work_has_interrupt())
if (llist_empty(lazy))
return false;
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
@ -171,6 +173,15 @@ void irq_work_run(void)
}
EXPORT_SYMBOL_GPL(irq_work_run);
void irq_work_tick(void)
{
struct llist_head *raised = &__get_cpu_var(raised_list);
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
irq_work_run_list(&__get_cpu_var(lazy_list));
}
/*
* Synchronize against the irq_work @entry, ensures the entry is not
* currently in use.

View File

@ -400,4 +400,5 @@ void tick_resume(void)
void __init tick_init(void)
{
tick_broadcast_init();
tick_nohz_init();
}

View File

@ -99,6 +99,13 @@ static inline int tick_broadcast_oneshot_active(void) { return 0; }
static inline bool tick_broadcast_oneshot_available(void) { return false; }
#endif /* !TICK_ONESHOT */
/* NO_HZ_FULL internal */
#ifdef CONFIG_NO_HZ_FULL
extern void tick_nohz_init(void);
# else
static inline void tick_nohz_init(void) { }
#endif
/*
* Broadcasting support
*/

View File

@ -295,22 +295,12 @@ out:
/* Parse the boot-time nohz CPU list from the kernel parameters. */
static int __init tick_nohz_full_setup(char *str)
{
int cpu;
alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
alloc_bootmem_cpumask_var(&housekeeping_mask);
if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
free_bootmem_cpumask_var(tick_nohz_full_mask);
return 1;
}
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
cpumask_andnot(housekeeping_mask,
cpu_possible_mask, tick_nohz_full_mask);
tick_nohz_full_running = true;
return 1;
@ -349,18 +339,11 @@ static int tick_nohz_init_all(void)
#ifdef CONFIG_NO_HZ_FULL_ALL
if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
return err;
}
if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
pr_err("NO_HZ: Can't allocate not-full dynticks cpumask\n");
WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n");
return err;
}
err = 0;
cpumask_setall(tick_nohz_full_mask);
cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
cpumask_clear(housekeeping_mask);
cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
tick_nohz_full_running = true;
#endif
return err;
@ -375,6 +358,37 @@ void __init tick_nohz_init(void)
return;
}
if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
cpumask_clear(tick_nohz_full_mask);
tick_nohz_full_running = false;
return;
}
/*
* Full dynticks uses irq work to drive the tick rescheduling on safe
* locking contexts. But then we need irq work to raise its own
* interrupts to avoid circular dependency on the tick
*/
if (!arch_irq_work_has_interrupt()) {
pr_warning("NO_HZ: Can't run full dynticks because arch doesn't "
"support irq work self-IPIs\n");
cpumask_clear(tick_nohz_full_mask);
cpumask_copy(housekeeping_mask, cpu_possible_mask);
tick_nohz_full_running = false;
return;
}
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
cpumask_andnot(housekeeping_mask,
cpu_possible_mask, tick_nohz_full_mask);
for_each_cpu(cpu, tick_nohz_full_mask)
context_tracking_cpu_set(cpu);

View File

@ -1385,7 +1385,7 @@ void update_process_times(int user_tick)
rcu_check_callbacks(cpu, user_tick);
#ifdef CONFIG_IRQ_WORK
if (in_irq())
irq_work_run();
irq_work_tick();
#endif
scheduler_tick();
run_posix_cpu_timers(p);