mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-19 20:34:20 +08:00
x86/mm: Stop calling leave_mm() in idle code
Now that lazy TLB suppresses all flush IPIs (as opposed to all but the first), there's no need to leave_mm() when going idle. This means we can get rid of the rcuidle hack in switch_mm_irqs_off() and we can unexport leave_mm(). This also removes acpi_unlazy_tlb() from the x86 and ia64 headers, since it has no callers any more. Signed-off-by: Andy Lutomirski <luto@kernel.org> Reviewed-by: Nadav Amit <nadav.amit@gmail.com> Reviewed-by: Borislav Petkov <bp@suse.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/03c699cfd6021e467be650d6b73deaccfe4b4bd7.1498751203.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
94b1b03b51
commit
43858b4f25
@ -112,8 +112,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
|
|||||||
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
|
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define acpi_unlazy_tlb(x)
|
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_NUMA
|
#ifdef CONFIG_ACPI_NUMA
|
||||||
extern cpumask_t early_cpu_possible_map;
|
extern cpumask_t early_cpu_possible_map;
|
||||||
#define for_each_possible_early_cpu(cpu) \
|
#define for_each_possible_early_cpu(cpu) \
|
||||||
|
@ -150,8 +150,6 @@ static inline void disable_acpi(void) { }
|
|||||||
extern int x86_acpi_numa_init(void);
|
extern int x86_acpi_numa_init(void);
|
||||||
#endif /* CONFIG_ACPI_NUMA */
|
#endif /* CONFIG_ACPI_NUMA */
|
||||||
|
|
||||||
#define acpi_unlazy_tlb(x) leave_mm(x)
|
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_APEI
|
#ifdef CONFIG_ACPI_APEI
|
||||||
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
|
||||||
{
|
{
|
||||||
|
@ -50,7 +50,6 @@ void leave_mm(int cpu)
|
|||||||
|
|
||||||
switch_mm(NULL, &init_mm, NULL);
|
switch_mm(NULL, &init_mm, NULL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(leave_mm);
|
|
||||||
|
|
||||||
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||||
struct task_struct *tsk)
|
struct task_struct *tsk)
|
||||||
@ -117,15 +116,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||||||
this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen,
|
this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen,
|
||||||
next_tlb_gen);
|
next_tlb_gen);
|
||||||
write_cr3(__pa(next->pgd));
|
write_cr3(__pa(next->pgd));
|
||||||
|
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
|
||||||
/*
|
TLB_FLUSH_ALL);
|
||||||
* This gets called via leave_mm() in the idle path
|
|
||||||
* where RCU functions differently. Tracing normally
|
|
||||||
* uses RCU, so we have to call the tracepoint
|
|
||||||
* specially here.
|
|
||||||
*/
|
|
||||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH,
|
|
||||||
TLB_FLUSH_ALL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -167,13 +159,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||||||
this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
||||||
write_cr3(__pa(next->pgd));
|
write_cr3(__pa(next->pgd));
|
||||||
|
|
||||||
/*
|
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||||
* This gets called via leave_mm() in the idle path where RCU
|
|
||||||
* functions differently. Tracing normally uses RCU, so we
|
|
||||||
* have to call the tracepoint specially here.
|
|
||||||
*/
|
|
||||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH,
|
|
||||||
TLB_FLUSH_ALL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
load_mm_cr4(next);
|
load_mm_cr4(next);
|
||||||
|
@ -708,8 +708,6 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
|
|||||||
static void acpi_idle_enter_bm(struct acpi_processor *pr,
|
static void acpi_idle_enter_bm(struct acpi_processor *pr,
|
||||||
struct acpi_processor_cx *cx, bool timer_bc)
|
struct acpi_processor_cx *cx, bool timer_bc)
|
||||||
{
|
{
|
||||||
acpi_unlazy_tlb(smp_processor_id());
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be done before busmaster disable as we might need to
|
* Must be done before busmaster disable as we might need to
|
||||||
* access HPET !
|
* access HPET !
|
||||||
|
@ -912,16 +912,15 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
|
|||||||
struct cpuidle_state *state = &drv->states[index];
|
struct cpuidle_state *state = &drv->states[index];
|
||||||
unsigned long eax = flg2MWAIT(state->flags);
|
unsigned long eax = flg2MWAIT(state->flags);
|
||||||
unsigned int cstate;
|
unsigned int cstate;
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
|
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* leave_mm() to avoid costly and often unnecessary wakeups
|
* NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition
|
||||||
* for flushing the user TLB's associated with the active mm.
|
* will probably flush the TLB. It's not guaranteed to flush
|
||||||
|
* the TLB, though, so it's not clear that we can do anything
|
||||||
|
* useful with this knowledge.
|
||||||
*/
|
*/
|
||||||
if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
|
|
||||||
leave_mm(cpu);
|
|
||||||
|
|
||||||
if (!(lapic_timer_reliable_states & (1 << (cstate))))
|
if (!(lapic_timer_reliable_states & (1 << (cstate))))
|
||||||
tick_broadcast_enter();
|
tick_broadcast_enter();
|
||||||
|
Loading…
Reference in New Issue
Block a user