mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-27 22:53:55 +08:00
6727ad9e20
When doing an nmi backtrace of many cores, most of which are idle, the output is a little overwhelming and very uninformative. Suppress messages for cpus that are idling when they are interrupted and just emit one line, "NMI backtrace for N skipped: idling at pc 0xNNN". We do this by grouping all the cpuidle code together into a new .cpuidle.text section, and then checking the address of the interrupted PC to see if it lies within that section. This commit suitably tags x86 and tile idle routines, and only adds in the minimal framework for other architectures. Link: http://lkml.kernel.org/r/1472487169-14923-5-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm] Tested-by: Petr Mladek <pmladek@suse.com> Cc: Aaron Tomlin <atomlin@redhat.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
65 lines
2.1 KiB
ArmAsm
65 lines
2.1 KiB
ArmAsm
/*
|
|
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation, version 2.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/unistd.h>
|
|
#include <asm/irqflags.h>
|
|
#include <asm/processor.h>
|
|
#include <arch/abi.h>
|
|
#include <arch/spr_def.h>
|
|
|
|
#ifdef __tilegx__
|
|
#define bnzt bnezt
|
|
#endif
|
|
|
|
STD_ENTRY(current_text_addr)
|
|
{ move r0, lr; jrp lr }
|
|
STD_ENDPROC(current_text_addr)
|
|
|
|
STD_ENTRY(KBacktraceIterator_init_current)
|
|
{ move r2, lr; lnk r1 }
|
|
{ move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
|
|
{ move r3, sp; j _KBacktraceIterator_init_current }
|
|
jrp lr /* keep backtracer happy */
|
|
STD_ENDPROC(KBacktraceIterator_init_current)
|
|
|
|
/* Loop forever on a nap during SMP boot. */
|
|
STD_ENTRY(smp_nap)
|
|
nap
|
|
nop /* avoid provoking the icache prefetch with a jump */
|
|
j smp_nap /* we are not architecturally guaranteed not to exit nap */
|
|
jrp lr /* clue in the backtracer */
|
|
STD_ENDPROC(smp_nap)
|
|
|
|
/*
|
|
* Enable interrupts racelessly and then nap until interrupted.
|
|
* Architecturally, we are guaranteed that enabling interrupts via
|
|
* mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
|
|
* This function's _cpu_idle_nap address is special; see intvec.S.
|
|
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
|
|
* as a result return to the function that called _cpu_idle().
|
|
*/
|
|
STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text)
|
|
movei r1, 1
|
|
IRQ_ENABLE_LOAD(r2, r3)
|
|
mtspr INTERRUPT_CRITICAL_SECTION, r1
|
|
IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
|
|
mtspr INTERRUPT_CRITICAL_SECTION, zero
|
|
.global _cpu_idle_nap
|
|
_cpu_idle_nap:
|
|
nap
|
|
nop /* avoid provoking the icache prefetch with a jump */
|
|
jrp lr
|
|
STD_ENDPROC(_cpu_idle)
|