2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 06:34:11 +08:00
linux-next/arch/arm/mach-spear/platsmp.c
Paul Gortmaker 8bd26e3a7e arm: delete __cpuinit/__CPUINIT usage from all ARM users
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications.  For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out.  Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
and are flagged as __cpuinit  -- so if we remove the __cpuinit from
the arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
related content into no-ops as early as possible, since that will get
rid of these warnings.  In any case, they are temporary and harmless.

This removes all the ARM uses of the __cpuinit macros from C code,
and all __CPUINIT from assembly code.  It also had two ".previous"
section statements that were paired off against __CPUINIT
(aka .section ".cpuinit.text") that also get removed here.

[1] https://lkml.org/lkml/2013/5/20/589

Cc: Russell King <linux@arm.linux.org.uk>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-07-14 19:36:52 -04:00

123 lines
3.0 KiB
C

/*
* arch/arm/mach-spear13xx/platsmp.c
*
* based upon linux/arch/arm/mach-realview/platsmp.c
*
* Copyright (C) 2012 ST Microelectronics Ltd.
* Shiraz Hashim <shiraz.hashim@st.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/io.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/smp_scu.h>
#include <mach/spear.h>
#include "generic.h"
static DEFINE_SPINLOCK(boot_lock);
static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
static void spear13xx_secondary_init(unsigned int cpu)
{
/*
* let the primary processor know we're out of the
* pen, then head off into the C entry point
*/
pen_release = -1;
smp_wmb();
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
}
static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned long timeout;
/*
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
* Note that "pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
pen_release = cpu;
flush_cache_all();
outer_flush_all();
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
if (pen_release == -1)
break;
udelay(10);
}
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
static void __init spear13xx_smp_init_cpus(void)
{
unsigned int i, ncores = scu_get_core_count(scu_base);
if (ncores > nr_cpu_ids) {
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
ncores, nr_cpu_ids);
ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
}
static void __init spear13xx_smp_prepare_cpus(unsigned int max_cpus)
{
scu_enable(scu_base);
/*
* Write the address of secondary startup into the system-wide location
* (presently it is in SRAM). The BootMonitor waits until it receives a
* soft interrupt, and then the secondary CPU branches to this address.
*/
__raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION);
}
struct smp_operations spear13xx_smp_ops __initdata = {
.smp_init_cpus = spear13xx_smp_init_cpus,
.smp_prepare_cpus = spear13xx_smp_prepare_cpus,
.smp_secondary_init = spear13xx_secondary_init,
.smp_boot_secondary = spear13xx_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_die = spear13xx_cpu_die,
#endif
};