mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-28 13:34:38 +08:00
5f40b90972
When booting a secondary CPU, the primary CPU hands two sets of page tables via the secondary_data struct: (1) swapper_pg_dir: a normal, cacheable, shared (if SMP) mapping of the kernel image (i.e. the tables used by init_mm). (2) idmap_pgd: an uncached mapping of the .idmap.text ELF section. The idmap is generally used when enabling and disabling the MMU, which includes early CPU boot. In this case, the secondary CPU switches to swapper as soon as it enters C code: struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_switch_mm(mm->pgd, mm); This causes a problem on ARMv7, where the identity mapping is treated as strongly-ordered leading to architecturally UNPREDICTABLE behaviour of exclusive accesses, such as those used by atomic_inc. This patch re-orders the secondary_start_kernel function so that we switch to swapper before performing any exclusive accesses. Cc: <stable@vger.kernel.org> Cc: David McKay <david.mckay@st.com> Reported-by: Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
724 lines
16 KiB
C
724 lines
16 KiB
C
/*
|
|
* linux/arch/arm/kernel/smp.c
|
|
*
|
|
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/init.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/profile.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/err.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/clockchips.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/cpufreq.h>
|
|
|
|
#include <linux/atomic.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/cputype.h>
|
|
#include <asm/exception.h>
|
|
#include <asm/idmap.h>
|
|
#include <asm/topology.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/localtimer.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/virt.h>
|
|
#include <asm/mach/arch.h>
|
|
|
|
/*
|
|
* as from 2.5, kernels no longer have an init_tasks structure
|
|
* so we need some other way of telling a new secondary core
|
|
* where to place its SVC stack
|
|
*/
|
|
struct secondary_data secondary_data;
|
|
|
|
/*
|
|
* control for which core is the next to come out of the secondary
|
|
* boot "holding pen"
|
|
*/
|
|
volatile int __cpuinitdata pen_release = -1;
|
|
|
|
enum ipi_msg_type {
|
|
IPI_WAKEUP,
|
|
IPI_TIMER,
|
|
IPI_RESCHEDULE,
|
|
IPI_CALL_FUNC,
|
|
IPI_CALL_FUNC_SINGLE,
|
|
IPI_CPU_STOP,
|
|
};
|
|
|
|
static DECLARE_COMPLETION(cpu_running);
|
|
|
|
static struct smp_operations smp_ops;
|
|
|
|
void __init smp_set_ops(struct smp_operations *ops)
|
|
{
|
|
if (ops)
|
|
smp_ops = *ops;
|
|
};
|
|
|
|
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
int ret;
|
|
|
|
/*
|
|
* We need to tell the secondary core where to find
|
|
* its stack and the page tables.
|
|
*/
|
|
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
|
|
secondary_data.pgdir = virt_to_phys(idmap_pgd);
|
|
secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
|
|
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
|
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
|
|
|
|
/*
|
|
* Now bring the CPU into our world.
|
|
*/
|
|
ret = boot_secondary(cpu, idle);
|
|
if (ret == 0) {
|
|
/*
|
|
* CPU was successfully started, wait for it
|
|
* to come online or time out.
|
|
*/
|
|
wait_for_completion_timeout(&cpu_running,
|
|
msecs_to_jiffies(1000));
|
|
|
|
if (!cpu_online(cpu)) {
|
|
pr_crit("CPU%u: failed to come online\n", cpu);
|
|
ret = -EIO;
|
|
}
|
|
} else {
|
|
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
|
|
}
|
|
|
|
secondary_data.stack = NULL;
|
|
secondary_data.pgdir = 0;
|
|
|
|
return ret;
|
|
}
|
|
|
|
/* platform specific SMP operations */
|
|
void __init smp_init_cpus(void)
|
|
{
|
|
if (smp_ops.smp_init_cpus)
|
|
smp_ops.smp_init_cpus();
|
|
}
|
|
|
|
static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
|
{
|
|
if (smp_ops.smp_prepare_cpus)
|
|
smp_ops.smp_prepare_cpus(max_cpus);
|
|
}
|
|
|
|
static void __cpuinit platform_secondary_init(unsigned int cpu)
|
|
{
|
|
if (smp_ops.smp_secondary_init)
|
|
smp_ops.smp_secondary_init(cpu);
|
|
}
|
|
|
|
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
{
|
|
if (smp_ops.smp_boot_secondary)
|
|
return smp_ops.smp_boot_secondary(cpu, idle);
|
|
return -ENOSYS;
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static void percpu_timer_stop(void);
|
|
|
|
static int platform_cpu_kill(unsigned int cpu)
|
|
{
|
|
if (smp_ops.cpu_kill)
|
|
return smp_ops.cpu_kill(cpu);
|
|
return 1;
|
|
}
|
|
|
|
static void platform_cpu_die(unsigned int cpu)
|
|
{
|
|
if (smp_ops.cpu_die)
|
|
smp_ops.cpu_die(cpu);
|
|
}
|
|
|
|
static int platform_cpu_disable(unsigned int cpu)
|
|
{
|
|
if (smp_ops.cpu_disable)
|
|
return smp_ops.cpu_disable(cpu);
|
|
|
|
/*
|
|
* By default, allow disabling all CPUs except the first one,
|
|
* since this is special on a lot of platforms, e.g. because
|
|
* of clock tick interrupts.
|
|
*/
|
|
return cpu == 0 ? -EPERM : 0;
|
|
}
|
|
/*
|
|
* __cpu_disable runs on the processor to be shutdown.
|
|
*/
|
|
int __cpuinit __cpu_disable(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
int ret;
|
|
|
|
ret = platform_cpu_disable(cpu);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/*
|
|
* Take this CPU offline. Once we clear this, we can't return,
|
|
* and we must not schedule until we're ready to give up the cpu.
|
|
*/
|
|
set_cpu_online(cpu, false);
|
|
|
|
/*
|
|
* OK - migrate IRQs away from this CPU
|
|
*/
|
|
migrate_irqs();
|
|
|
|
/*
|
|
* Stop the local timer for this CPU.
|
|
*/
|
|
percpu_timer_stop();
|
|
|
|
/*
|
|
* Flush user cache and TLB mappings, and then remove this CPU
|
|
* from the vm mask set of all processes.
|
|
*
|
|
* Caches are flushed to the Level of Unification Inner Shareable
|
|
* to write-back dirty lines to unified caches shared by all CPUs.
|
|
*/
|
|
flush_cache_louis();
|
|
local_flush_tlb_all();
|
|
|
|
clear_tasks_mm_cpumask(cpu);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static DECLARE_COMPLETION(cpu_died);
|
|
|
|
/*
|
|
* called on the thread which is asking for a CPU to be shutdown -
|
|
* waits until shutdown has completed, or it is timed out.
|
|
*/
|
|
void __cpuinit __cpu_die(unsigned int cpu)
|
|
{
|
|
if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
|
|
pr_err("CPU%u: cpu didn't die\n", cpu);
|
|
return;
|
|
}
|
|
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
|
|
|
|
if (!platform_cpu_kill(cpu))
|
|
printk("CPU%u: unable to kill\n", cpu);
|
|
}
|
|
|
|
/*
|
|
* Called from the idle thread for the CPU which has been shutdown.
|
|
*
|
|
* Note that we disable IRQs here, but do not re-enable them
|
|
* before returning to the caller. This is also the behaviour
|
|
* of the other hotplug-cpu capable cores, so presumably coming
|
|
* out of idle fixes this.
|
|
*/
|
|
void __ref cpu_die(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
idle_task_exit();
|
|
|
|
local_irq_disable();
|
|
mb();
|
|
|
|
/* Tell __cpu_die() that this CPU is now safe to dispose of */
|
|
RCU_NONIDLE(complete(&cpu_died));
|
|
|
|
/*
|
|
* actual CPU shutdown procedure is at least platform (if not
|
|
* CPU) specific.
|
|
*/
|
|
platform_cpu_die(cpu);
|
|
|
|
/*
|
|
* Do not return to the idle loop - jump back to the secondary
|
|
* cpu initialisation. There's some initialisation which needs
|
|
* to be repeated to undo the effects of taking the CPU offline.
|
|
*/
|
|
__asm__("mov sp, %0\n"
|
|
" mov fp, #0\n"
|
|
" b secondary_start_kernel"
|
|
:
|
|
: "r" (task_stack_page(current) + THREAD_SIZE - 8));
|
|
}
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
/*
|
|
* Called by both boot and secondaries to move global data into
|
|
* per-processor storage.
|
|
*/
|
|
static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
|
|
{
|
|
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
|
|
|
|
cpu_info->loops_per_jiffy = loops_per_jiffy;
|
|
|
|
store_cpu_topology(cpuid);
|
|
}
|
|
|
|
static void percpu_timer_setup(void);
|
|
|
|
/*
|
|
* This is the secondary CPU boot entry. We're using this CPUs
|
|
* idle thread stack, but a set of temporary page tables.
|
|
*/
|
|
asmlinkage void __cpuinit secondary_start_kernel(void)
|
|
{
|
|
struct mm_struct *mm = &init_mm;
|
|
unsigned int cpu;
|
|
|
|
/*
|
|
* The identity mapping is uncached (strongly ordered), so
|
|
* switch away from it before attempting any exclusive accesses.
|
|
*/
|
|
cpu_switch_mm(mm->pgd, mm);
|
|
enter_lazy_tlb(mm, current);
|
|
local_flush_tlb_all();
|
|
|
|
/*
|
|
* All kernel threads share the same mm context; grab a
|
|
* reference and switch to it.
|
|
*/
|
|
cpu = smp_processor_id();
|
|
atomic_inc(&mm->mm_count);
|
|
current->active_mm = mm;
|
|
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
|
|
|
printk("CPU%u: Booted secondary processor\n", cpu);
|
|
|
|
cpu_init();
|
|
preempt_disable();
|
|
trace_hardirqs_off();
|
|
|
|
/*
|
|
* Give the platform a chance to do its own initialisation.
|
|
*/
|
|
platform_secondary_init(cpu);
|
|
|
|
notify_cpu_starting(cpu);
|
|
|
|
calibrate_delay();
|
|
|
|
smp_store_cpu_info(cpu);
|
|
|
|
/*
|
|
* OK, now it's safe to let the boot CPU continue. Wait for
|
|
* the CPU migration code to notice that the CPU is online
|
|
* before we continue - which happens after __cpu_up returns.
|
|
*/
|
|
set_cpu_online(cpu, true);
|
|
complete(&cpu_running);
|
|
|
|
/*
|
|
* Setup the percpu timer for this CPU.
|
|
*/
|
|
percpu_timer_setup();
|
|
|
|
local_irq_enable();
|
|
local_fiq_enable();
|
|
|
|
/*
|
|
* OK, it's off to the idle thread for us
|
|
*/
|
|
cpu_idle();
|
|
}
|
|
|
|
void __init smp_cpus_done(unsigned int max_cpus)
|
|
{
|
|
int cpu;
|
|
unsigned long bogosum = 0;
|
|
|
|
for_each_online_cpu(cpu)
|
|
bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
|
|
|
|
printk(KERN_INFO "SMP: Total of %d processors activated "
|
|
"(%lu.%02lu BogoMIPS).\n",
|
|
num_online_cpus(),
|
|
bogosum / (500000/HZ),
|
|
(bogosum / (5000/HZ)) % 100);
|
|
|
|
hyp_mode_check();
|
|
}
|
|
|
|
void __init smp_prepare_boot_cpu(void)
|
|
{
|
|
}
|
|
|
|
void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
{
|
|
unsigned int ncores = num_possible_cpus();
|
|
|
|
init_cpu_topology();
|
|
|
|
smp_store_cpu_info(smp_processor_id());
|
|
|
|
/*
|
|
* are we trying to boot more cores than exist?
|
|
*/
|
|
if (max_cpus > ncores)
|
|
max_cpus = ncores;
|
|
if (ncores > 1 && max_cpus) {
|
|
/*
|
|
* Enable the local timer or broadcast device for the
|
|
* boot CPU, but only if we have more than one CPU.
|
|
*/
|
|
percpu_timer_setup();
|
|
|
|
/*
|
|
* Initialise the present map, which describes the set of CPUs
|
|
* actually populated at the present time. A platform should
|
|
* re-initialize the map in platform_smp_prepare_cpus() if
|
|
* present != possible (e.g. physical hotplug).
|
|
*/
|
|
init_cpu_present(cpu_possible_mask);
|
|
|
|
/*
|
|
* Initialise the SCU if there are more than one CPU
|
|
* and let them know where to start.
|
|
*/
|
|
platform_smp_prepare_cpus(max_cpus);
|
|
}
|
|
}
|
|
|
|
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
|
|
|
|
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
|
|
{
|
|
smp_cross_call = fn;
|
|
}
|
|
|
|
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
{
|
|
smp_cross_call(mask, IPI_CALL_FUNC);
|
|
}
|
|
|
|
void arch_send_call_function_single_ipi(int cpu)
|
|
{
|
|
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
|
|
}
|
|
|
|
static const char *ipi_types[NR_IPI] = {
|
|
#define S(x,s) [x] = s
|
|
S(IPI_WAKEUP, "CPU wakeup interrupts"),
|
|
S(IPI_TIMER, "Timer broadcast interrupts"),
|
|
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
|
|
S(IPI_CALL_FUNC, "Function call interrupts"),
|
|
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
|
|
S(IPI_CPU_STOP, "CPU stop interrupts"),
|
|
};
|
|
|
|
void show_ipi_list(struct seq_file *p, int prec)
|
|
{
|
|
unsigned int cpu, i;
|
|
|
|
for (i = 0; i < NR_IPI; i++) {
|
|
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
|
|
|
|
for_each_present_cpu(cpu)
|
|
seq_printf(p, "%10u ",
|
|
__get_irq_stat(cpu, ipi_irqs[i]));
|
|
|
|
seq_printf(p, " %s\n", ipi_types[i]);
|
|
}
|
|
}
|
|
|
|
u64 smp_irq_stat_cpu(unsigned int cpu)
|
|
{
|
|
u64 sum = 0;
|
|
int i;
|
|
|
|
for (i = 0; i < NR_IPI; i++)
|
|
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
|
|
|
return sum;
|
|
}
|
|
|
|
/*
|
|
* Timer (local or broadcast) support
|
|
*/
|
|
static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
|
|
|
|
static void ipi_timer(void)
|
|
{
|
|
struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
|
|
evt->event_handler(evt);
|
|
}
|
|
|
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
|
static void smp_timer_broadcast(const struct cpumask *mask)
|
|
{
|
|
smp_cross_call(mask, IPI_TIMER);
|
|
}
|
|
#else
|
|
#define smp_timer_broadcast NULL
|
|
#endif
|
|
|
|
static void broadcast_timer_set_mode(enum clock_event_mode mode,
|
|
struct clock_event_device *evt)
|
|
{
|
|
}
|
|
|
|
static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
|
|
{
|
|
evt->name = "dummy_timer";
|
|
evt->features = CLOCK_EVT_FEAT_ONESHOT |
|
|
CLOCK_EVT_FEAT_PERIODIC |
|
|
CLOCK_EVT_FEAT_DUMMY;
|
|
evt->rating = 400;
|
|
evt->mult = 1;
|
|
evt->set_mode = broadcast_timer_set_mode;
|
|
|
|
clockevents_register_device(evt);
|
|
}
|
|
|
|
static struct local_timer_ops *lt_ops;
|
|
|
|
#ifdef CONFIG_LOCAL_TIMERS
|
|
int local_timer_register(struct local_timer_ops *ops)
|
|
{
|
|
if (!is_smp() || !setup_max_cpus)
|
|
return -ENXIO;
|
|
|
|
if (lt_ops)
|
|
return -EBUSY;
|
|
|
|
lt_ops = ops;
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static void __cpuinit percpu_timer_setup(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
|
|
|
|
evt->cpumask = cpumask_of(cpu);
|
|
evt->broadcast = smp_timer_broadcast;
|
|
|
|
if (!lt_ops || lt_ops->setup(evt))
|
|
broadcast_timer_setup(evt);
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
/*
|
|
* The generic clock events code purposely does not stop the local timer
|
|
* on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
|
|
* manually here.
|
|
*/
|
|
static void percpu_timer_stop(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
|
|
|
|
if (lt_ops)
|
|
lt_ops->stop(evt);
|
|
}
|
|
#endif
|
|
|
|
static DEFINE_RAW_SPINLOCK(stop_lock);
|
|
|
|
/*
|
|
* ipi_cpu_stop - handle IPI from smp_send_stop()
|
|
*/
|
|
static void ipi_cpu_stop(unsigned int cpu)
|
|
{
|
|
if (system_state == SYSTEM_BOOTING ||
|
|
system_state == SYSTEM_RUNNING) {
|
|
raw_spin_lock(&stop_lock);
|
|
printk(KERN_CRIT "CPU%u: stopping\n", cpu);
|
|
dump_stack();
|
|
raw_spin_unlock(&stop_lock);
|
|
}
|
|
|
|
set_cpu_online(cpu, false);
|
|
|
|
local_fiq_disable();
|
|
local_irq_disable();
|
|
|
|
while (1)
|
|
cpu_relax();
|
|
}
|
|
|
|
/*
|
|
* Main handler for inter-processor interrupts
|
|
*/
|
|
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
|
{
|
|
handle_IPI(ipinr, regs);
|
|
}
|
|
|
|
void handle_IPI(int ipinr, struct pt_regs *regs)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
if (ipinr < NR_IPI)
|
|
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
|
|
|
|
switch (ipinr) {
|
|
case IPI_WAKEUP:
|
|
break;
|
|
|
|
case IPI_TIMER:
|
|
irq_enter();
|
|
ipi_timer();
|
|
irq_exit();
|
|
break;
|
|
|
|
case IPI_RESCHEDULE:
|
|
scheduler_ipi();
|
|
break;
|
|
|
|
case IPI_CALL_FUNC:
|
|
irq_enter();
|
|
generic_smp_call_function_interrupt();
|
|
irq_exit();
|
|
break;
|
|
|
|
case IPI_CALL_FUNC_SINGLE:
|
|
irq_enter();
|
|
generic_smp_call_function_single_interrupt();
|
|
irq_exit();
|
|
break;
|
|
|
|
case IPI_CPU_STOP:
|
|
irq_enter();
|
|
ipi_cpu_stop(cpu);
|
|
irq_exit();
|
|
break;
|
|
|
|
default:
|
|
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
|
|
cpu, ipinr);
|
|
break;
|
|
}
|
|
set_irq_regs(old_regs);
|
|
}
|
|
|
|
void smp_send_reschedule(int cpu)
|
|
{
|
|
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
static void smp_kill_cpus(cpumask_t *mask)
|
|
{
|
|
unsigned int cpu;
|
|
for_each_cpu(cpu, mask)
|
|
platform_cpu_kill(cpu);
|
|
}
|
|
#else
|
|
static void smp_kill_cpus(cpumask_t *mask) { }
|
|
#endif
|
|
|
|
void smp_send_stop(void)
|
|
{
|
|
unsigned long timeout;
|
|
struct cpumask mask;
|
|
|
|
cpumask_copy(&mask, cpu_online_mask);
|
|
cpumask_clear_cpu(smp_processor_id(), &mask);
|
|
if (!cpumask_empty(&mask))
|
|
smp_cross_call(&mask, IPI_CPU_STOP);
|
|
|
|
/* Wait up to one second for other CPUs to stop */
|
|
timeout = USEC_PER_SEC;
|
|
while (num_online_cpus() > 1 && timeout--)
|
|
udelay(1);
|
|
|
|
if (num_online_cpus() > 1)
|
|
pr_warning("SMP: failed to stop secondary CPUs\n");
|
|
|
|
smp_kill_cpus(&mask);
|
|
}
|
|
|
|
/*
|
|
* not supported here
|
|
*/
|
|
int setup_profiling_timer(unsigned int multiplier)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
#ifdef CONFIG_CPU_FREQ
|
|
|
|
static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
|
|
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
|
|
static unsigned long global_l_p_j_ref;
|
|
static unsigned long global_l_p_j_ref_freq;
|
|
|
|
static int cpufreq_callback(struct notifier_block *nb,
|
|
unsigned long val, void *data)
|
|
{
|
|
struct cpufreq_freqs *freq = data;
|
|
int cpu = freq->cpu;
|
|
|
|
if (freq->flags & CPUFREQ_CONST_LOOPS)
|
|
return NOTIFY_OK;
|
|
|
|
if (!per_cpu(l_p_j_ref, cpu)) {
|
|
per_cpu(l_p_j_ref, cpu) =
|
|
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
|
per_cpu(l_p_j_ref_freq, cpu) = freq->old;
|
|
if (!global_l_p_j_ref) {
|
|
global_l_p_j_ref = loops_per_jiffy;
|
|
global_l_p_j_ref_freq = freq->old;
|
|
}
|
|
}
|
|
|
|
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
|
|
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
|
|
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
|
|
loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
|
|
global_l_p_j_ref_freq,
|
|
freq->new);
|
|
per_cpu(cpu_data, cpu).loops_per_jiffy =
|
|
cpufreq_scale(per_cpu(l_p_j_ref, cpu),
|
|
per_cpu(l_p_j_ref_freq, cpu),
|
|
freq->new);
|
|
}
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static struct notifier_block cpufreq_notifier = {
|
|
.notifier_call = cpufreq_callback,
|
|
};
|
|
|
|
static int __init register_cpufreq_notifier(void)
|
|
{
|
|
return cpufreq_register_notifier(&cpufreq_notifier,
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
}
|
|
core_initcall(register_cpufreq_notifier);
|
|
|
|
#endif
|