mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 17:53:56 +08:00
sparc: Use generic idle loop
Add generic cpu_idle support sparc32: - replace call to cpu_idle() with cpu_startup_entry() - add arch_cpu_idle() sparc64: - smp_callin() now include cpu_startup_entry() call so we can skip calling cpu_idle from assembler - add arch_cpu_idle() and arch_cpu_idle_dead() Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Reviewed-by: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Cc: torvalds@linux-foundation.org Cc: rusty@rustcorp.com.au Cc: paulmck@linux.vnet.ibm.com Cc: peterz@infradead.org Cc: magnus.damm@gmail.com Acked-by: David Miller <davem@davemloft.net> Link: http://lkml.kernel.org/r/20130411193850.GA2330@merkur.ravnborg.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
781b0e870c
commit
87fa05aeb3
@ -37,6 +37,7 @@ config SPARC
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_CMOS_UPDATE
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_IDLE_LOOP
|
||||
select GENERIC_STRNCPY_FROM_USER
|
||||
select GENERIC_STRNLEN_USER
|
||||
select MODULES_USE_ELF_RELA
|
||||
|
@ -128,8 +128,7 @@ hv_cpu_startup:
|
||||
|
||||
call smp_callin
|
||||
nop
|
||||
call cpu_idle
|
||||
mov 0, %o0
|
||||
|
||||
call cpu_panic
|
||||
nop
|
||||
|
||||
|
@ -64,23 +64,12 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
|
||||
struct task_struct *last_task_used_math = NULL;
|
||||
struct thread_info *current_set[NR_CPUS];
|
||||
|
||||
/*
|
||||
* the idle loop on a Sparc... ;)
|
||||
*/
|
||||
void cpu_idle(void)
|
||||
/* Idle loop support. */
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
|
||||
/* endless idle loop with no priority at all */
|
||||
for (;;) {
|
||||
while (!need_resched()) {
|
||||
if (sparc_idle)
|
||||
(*sparc_idle)();
|
||||
else
|
||||
cpu_relax();
|
||||
}
|
||||
schedule_preempt_disabled();
|
||||
}
|
||||
if (sparc_idle)
|
||||
(*sparc_idle)();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
|
||||
|
@ -52,20 +52,17 @@
|
||||
|
||||
#include "kstack.h"
|
||||
|
||||
static void sparc64_yield(int cpu)
|
||||
/* Idle loop support on sparc64. */
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
if (tlb_type != hypervisor) {
|
||||
touch_nmi_watchdog();
|
||||
return;
|
||||
}
|
||||
|
||||
clear_thread_flag(TIF_POLLING_NRFLAG);
|
||||
smp_mb__after_clear_bit();
|
||||
|
||||
while (!need_resched() && !cpu_is_offline(cpu)) {
|
||||
} else {
|
||||
unsigned long pstate;
|
||||
|
||||
/* Disable interrupts. */
|
||||
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
|
||||
* the cpu sleep hypervisor call.
|
||||
*/
|
||||
__asm__ __volatile__(
|
||||
"rdpr %%pstate, %0\n\t"
|
||||
"andn %0, %1, %0\n\t"
|
||||
@ -73,7 +70,7 @@ static void sparc64_yield(int cpu)
|
||||
: "=&r" (pstate)
|
||||
: "i" (PSTATE_IE));
|
||||
|
||||
if (!need_resched() && !cpu_is_offline(cpu))
|
||||
if (!need_resched() && !cpu_is_offline(smp_processor_id()))
|
||||
sun4v_cpu_yield();
|
||||
|
||||
/* Re-enable interrupts. */
|
||||
@ -84,36 +81,16 @@ static void sparc64_yield(int cpu)
|
||||
: "=&r" (pstate)
|
||||
: "i" (PSTATE_IE));
|
||||
}
|
||||
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/* The idle loop on sparc64. */
|
||||
void cpu_idle(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
set_thread_flag(TIF_POLLING_NRFLAG);
|
||||
|
||||
while(1) {
|
||||
tick_nohz_idle_enter();
|
||||
rcu_idle_enter();
|
||||
|
||||
while (!need_resched() && !cpu_is_offline(cpu))
|
||||
sparc64_yield(cpu);
|
||||
|
||||
rcu_idle_exit();
|
||||
tick_nohz_idle_exit();
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (cpu_is_offline(cpu)) {
|
||||
sched_preempt_enable_no_resched();
|
||||
cpu_play_dead();
|
||||
}
|
||||
#endif
|
||||
schedule_preempt_disabled();
|
||||
}
|
||||
void arch_cpu_idle_dead()
|
||||
{
|
||||
sched_preempt_enable_no_resched();
|
||||
cpu_play_dead();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static void show_regwindow32(struct pt_regs *regs)
|
||||
|
@ -369,7 +369,7 @@ void __cpuinit sparc_start_secondary(void *arg)
|
||||
local_irq_enable();
|
||||
|
||||
wmb();
|
||||
cpu_idle();
|
||||
cpu_startup_entry(CPUHP_ONLINE);
|
||||
|
||||
/* We should never reach here! */
|
||||
BUG();
|
||||
|
@ -127,6 +127,8 @@ void __cpuinit smp_callin(void)
|
||||
|
||||
/* idle thread is expected to have preempt disabled */
|
||||
preempt_disable();
|
||||
|
||||
cpu_startup_entry(CPUHP_ONLINE);
|
||||
}
|
||||
|
||||
void cpu_panic(void)
|
||||
|
@ -407,8 +407,7 @@ after_lock_tlb:
|
||||
|
||||
call smp_callin
|
||||
nop
|
||||
call cpu_idle
|
||||
mov 0, %o0
|
||||
|
||||
call cpu_panic
|
||||
nop
|
||||
1: b,a,pt %xcc, 1b
|
||||
|
Loading…
Reference in New Issue
Block a user