2005-10-10 20:50:37 +08:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Common boot and setup code.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 PPC64 Team, IBM Corp
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
2013-07-25 10:12:32 +08:00
|
|
|
#define DEBUG
|
2005-10-10 20:50:37 +08:00
|
|
|
|
2011-07-23 06:24:23 +08:00
|
|
|
#include <linux/export.h>
|
2005-10-10 20:50:37 +08:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/reboot.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/initrd.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/console.h>
|
|
|
|
#include <linux/utsname.h>
|
|
|
|
#include <linux/tty.h>
|
|
|
|
#include <linux/root_dev.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/serial.h>
|
|
|
|
#include <linux/serial_8250.h>
|
[PATCH] powerpc/64: per cpu data optimisations
The current ppc64 per cpu data implementation is quite slow. eg:
lhz 11,18(13) /* smp_processor_id() */
ld 9,.LC63-.LCTOC1(30) /* per_cpu__variable_name */
ld 8,.LC61-.LCTOC1(30) /* __per_cpu_offset */
sldi 11,11,3 /* form index into __per_cpu_offset */
mr 10,9
ldx 9,11,8 /* __per_cpu_offset[smp_processor_id()] */
ldx 0,10,9 /* load per cpu data */
5 loads for something that is supposed to be fast, pretty awful. One
reason for the large number of loads is that we have to synthesize 2
64bit constants (per_cpu__variable_name and __per_cpu_offset).
By putting __per_cpu_offset into the paca we can avoid the 2 loads
associated with it:
ld 11,56(13) /* paca->data_offset */
ld 9,.LC59-.LCTOC1(30) /* per_cpu__variable_name */
ldx 0,9,11 /* load per cpu data
Longer term we can should be able to do even better than 3 loads.
If per_cpu__variable_name wasnt a 64bit constant and paca->data_offset
was in a register we could cut it down to one load. A suggestion from
Rusty is to use gcc's __thread extension here. In order to do this we
would need to free up r13 (the __thread register and where the paca
currently is). So far Ive had a few unsuccessful attempts at doing that :)
The patch also allocates per cpu memory node local on NUMA machines.
This patch from Rusty has been sitting in my queue _forever_ but stalled
when I hit the compiler bug. Sorry about that.
Finally I also only allocate per cpu data for possible cpus, which comes
straight out of the x86-64 port. On a pseries kernel (with NR_CPUS == 128)
and 4 possible cpus we see some nice gains:
total used free shared buffers cached
Mem: 4012228 212860 3799368 0 0 162424
total used free shared buffers cached
Mem: 4016200 212984 3803216 0 0 162424
A saving of 3.75MB. Quite nice for smaller machines. Note: we now have
to be careful of per cpu users that touch data for !possible cpus.
At this stage it might be worth making the NUMA and possible cpu
optimisations generic, but per cpu init is done so early we have to be
careful that all architectures have their possible map setup correctly.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-01-11 10:16:44 +08:00
|
|
|
#include <linux/bootmem.h>
|
2006-11-11 14:25:02 +08:00
|
|
|
#include <linux/pci.h>
|
2008-04-17 12:35:01 +08:00
|
|
|
#include <linux/lockdep.h>
|
2010-07-12 12:36:09 +08:00
|
|
|
#include <linux/memblock.h>
|
2014-06-04 15:50:47 +08:00
|
|
|
#include <linux/memory.h>
|
2015-04-09 10:52:56 +08:00
|
|
|
#include <linux/nmi.h>
|
2011-10-10 18:50:43 +08:00
|
|
|
|
2005-10-10 20:50:37 +08:00
|
|
|
#include <asm/io.h>
|
2005-12-04 15:39:37 +08:00
|
|
|
#include <asm/kdump.h>
|
2005-10-10 20:50:37 +08:00
|
|
|
#include <asm/prom.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#include <asm/elf.h>
|
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/paca.h>
|
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/btext.h>
|
|
|
|
#include <asm/nvram.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/rtas.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/serial.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/firmware.h>
|
2005-10-28 20:53:37 +08:00
|
|
|
#include <asm/xmon.h>
|
2005-11-07 06:49:43 +08:00
|
|
|
#include <asm/udbg.h>
|
2005-11-11 21:06:06 +08:00
|
|
|
#include <asm/kexec.h>
|
2011-04-06 13:18:48 +08:00
|
|
|
#include <asm/code-patching.h>
|
2016-03-24 19:04:04 +08:00
|
|
|
#include <asm/livepatch.h>
|
2016-07-05 13:03:49 +08:00
|
|
|
#include <asm/opal.h>
|
2016-07-05 13:07:51 +08:00
|
|
|
#include <asm/cputhreads.h>
|
2005-10-10 20:50:37 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
#define DBG(fmt...) udbg_printf(fmt)
|
|
|
|
#else
|
|
|
|
#define DBG(fmt...)
|
|
|
|
#endif
|
|
|
|
|
2013-03-20 14:30:12 +08:00
|
|
|
int spinning_secondaries;
|
2005-10-10 20:50:37 +08:00
|
|
|
u64 ppc64_pft_size;
|
|
|
|
|
2005-12-09 09:40:17 +08:00
|
|
|
struct ppc64_caches ppc64_caches = {
|
2017-01-09 07:31:47 +08:00
|
|
|
.l1d = {
|
|
|
|
.block_size = 0x40,
|
|
|
|
.log_block_size = 6,
|
|
|
|
},
|
|
|
|
.l1i = {
|
|
|
|
.block_size = 0x40,
|
|
|
|
.log_block_size = 6
|
|
|
|
},
|
2005-12-09 09:40:17 +08:00
|
|
|
};
|
2005-10-10 20:50:37 +08:00
|
|
|
EXPORT_SYMBOL_GPL(ppc64_caches);
|
|
|
|
|
2013-10-12 08:22:38 +08:00
|
|
|
#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
|
2016-07-05 13:07:51 +08:00
|
|
|
void __init setup_tlb_core_data(void)
|
2013-10-12 08:22:38 +08:00
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
2014-03-08 04:48:35 +08:00
|
|
|
BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
|
|
|
|
|
2013-10-12 08:22:38 +08:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
int first = cpu_first_thread_sibling(cpu);
|
|
|
|
|
2015-10-07 11:48:09 +08:00
|
|
|
/*
|
|
|
|
* If we boot via kdump on a non-primary thread,
|
|
|
|
* make sure we point at the thread that actually
|
|
|
|
* set up this TLB.
|
|
|
|
*/
|
|
|
|
if (cpu_first_thread_sibling(boot_cpuid) == first)
|
|
|
|
first = boot_cpuid;
|
|
|
|
|
2013-10-12 08:22:38 +08:00
|
|
|
paca[cpu].tcd_ptr = &paca[first].tcd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have threads, we need either tlbsrx.
|
|
|
|
* or e6500 tablewalk mode, or else TLB handlers
|
|
|
|
* will be racy and could produce duplicate entries.
|
2017-02-15 17:24:25 +08:00
|
|
|
* Should we panic instead?
|
2013-10-12 08:22:38 +08:00
|
|
|
*/
|
2017-02-15 17:24:25 +08:00
|
|
|
WARN_ONCE(smt_enabled_at_boot >= 2 &&
|
|
|
|
!mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
|
|
|
|
book3e_htw_mode != PPC_HTW_E6500,
|
|
|
|
"%s: unsupported MMU configuration\n", __func__);
|
2013-10-12 08:22:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-10-10 20:50:37 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
2010-08-05 15:42:11 +08:00
|
|
|
static char *smt_enabled_cmdline;
|
2005-10-10 20:50:37 +08:00
|
|
|
|
|
|
|
/* Look for ibm,smt-enabled OF option */
|
2016-07-05 13:07:51 +08:00
|
|
|
void __init check_smt_enabled(void)
|
2005-10-10 20:50:37 +08:00
|
|
|
{
|
|
|
|
struct device_node *dn;
|
2006-07-12 13:35:54 +08:00
|
|
|
const char *smt_option;
|
2005-10-10 20:50:37 +08:00
|
|
|
|
2010-08-05 15:42:11 +08:00
|
|
|
/* Default to enabling all threads */
|
|
|
|
smt_enabled_at_boot = threads_per_core;
|
2005-10-10 20:50:37 +08:00
|
|
|
|
2010-08-05 15:42:11 +08:00
|
|
|
/* Allow the command line to overrule the OF option */
|
|
|
|
if (smt_enabled_cmdline) {
|
|
|
|
if (!strcmp(smt_enabled_cmdline, "on"))
|
|
|
|
smt_enabled_at_boot = threads_per_core;
|
|
|
|
else if (!strcmp(smt_enabled_cmdline, "off"))
|
|
|
|
smt_enabled_at_boot = 0;
|
|
|
|
else {
|
2014-08-09 05:24:01 +08:00
|
|
|
int smt;
|
2010-08-05 15:42:11 +08:00
|
|
|
int rc;
|
|
|
|
|
2014-08-09 05:24:01 +08:00
|
|
|
rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
|
2010-08-05 15:42:11 +08:00
|
|
|
if (!rc)
|
|
|
|
smt_enabled_at_boot =
|
2014-08-09 05:24:01 +08:00
|
|
|
min(threads_per_core, smt);
|
2010-08-05 15:42:11 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dn = of_find_node_by_path("/options");
|
|
|
|
if (dn) {
|
|
|
|
smt_option = of_get_property(dn, "ibm,smt-enabled",
|
|
|
|
NULL);
|
|
|
|
|
|
|
|
if (smt_option) {
|
|
|
|
if (!strcmp(smt_option, "on"))
|
|
|
|
smt_enabled_at_boot = threads_per_core;
|
|
|
|
else if (!strcmp(smt_option, "off"))
|
|
|
|
smt_enabled_at_boot = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
of_node_put(dn);
|
|
|
|
}
|
|
|
|
}
|
2005-10-10 20:50:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Look for smt-enabled= cmdline option */
|
|
|
|
static int __init early_smt_enabled(char *p)
|
|
|
|
{
|
2010-08-05 15:42:11 +08:00
|
|
|
smt_enabled_cmdline = p;
|
2005-10-10 20:50:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("smt-enabled", early_smt_enabled);
|
|
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2013-02-12 22:44:50 +08:00
|
|
|
/** Fix up paca fields required for the boot cpu */
|
2016-07-05 13:07:50 +08:00
|
|
|
static void __init fixup_boot_paca(void)
|
2013-02-12 22:44:50 +08:00
|
|
|
{
|
|
|
|
/* The boot cpu is started */
|
|
|
|
get_paca()->cpu_start = 1;
|
|
|
|
/* Allow percpu accesses to work until we setup percpu data */
|
|
|
|
get_paca()->data_offset = 0;
|
|
|
|
}
|
|
|
|
|
2016-07-05 13:07:50 +08:00
|
|
|
static void __init configure_exceptions(void)
|
2014-03-28 10:36:30 +08:00
|
|
|
{
|
2014-07-17 13:29:45 +08:00
|
|
|
/*
|
2016-07-05 13:03:49 +08:00
|
|
|
* Setup the trampolines from the lowmem exception vectors
|
|
|
|
* to the kdump kernel when not using a relocatable kernel.
|
2014-07-17 13:29:45 +08:00
|
|
|
*/
|
2016-07-05 13:03:49 +08:00
|
|
|
setup_kdump_trampoline();
|
|
|
|
|
|
|
|
/* Under a PAPR hypervisor, we need hypercalls */
|
|
|
|
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
|
|
|
|
/* Enable AIL if possible */
|
|
|
|
pseries_enable_reloc_on_exc();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the hypervisor that we want our exceptions to
|
|
|
|
* be taken in little endian mode.
|
|
|
|
*
|
|
|
|
* We don't call this for big endian as our calling convention
|
|
|
|
* makes us always enter in BE, and the call may fail under
|
|
|
|
* some circumstances with kdump.
|
|
|
|
*/
|
|
|
|
#ifdef __LITTLE_ENDIAN__
|
|
|
|
pseries_little_endian_exceptions();
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
/* Set endian mode using OPAL */
|
|
|
|
if (firmware_has_feature(FW_FEATURE_OPAL))
|
|
|
|
opal_configure_cores();
|
|
|
|
|
2016-11-15 12:28:33 +08:00
|
|
|
/* AIL on native is done in cpu_ready_for_interrupts() */
|
2014-03-28 10:36:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-05 13:03:49 +08:00
|
|
|
static void cpu_ready_for_interrupts(void)
|
|
|
|
{
|
2016-11-15 12:28:33 +08:00
|
|
|
/*
|
|
|
|
* Enable AIL if supported, and we are in hypervisor mode. This
|
|
|
|
* is called once for every processor.
|
|
|
|
*
|
|
|
|
* If we are not in hypervisor mode the job is done once for
|
|
|
|
* the whole partition in configure_exceptions().
|
|
|
|
*/
|
|
|
|
if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
|
|
|
|
early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
|
|
|
unsigned long lpcr = mfspr(SPRN_LPCR);
|
|
|
|
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
|
|
|
|
}
|
|
|
|
|
2016-07-05 13:03:49 +08:00
|
|
|
/* Set IR and DR in PACA MSR */
|
|
|
|
get_paca()->kernel_msr = MSR_KERNEL;
|
|
|
|
}
|
|
|
|
|
2005-10-10 20:50:37 +08:00
|
|
|
/*
|
|
|
|
* Early initialization entry point. This is called by head.S
|
|
|
|
* with MMU translation disabled. We rely on the "feature" of
|
|
|
|
* the CPU that ignores the top 2 bits of the address in real
|
|
|
|
* mode so we can access kernel globals normally provided we
|
|
|
|
* only toy with things in the RMO region. From here, we do
|
2010-07-12 12:36:09 +08:00
|
|
|
* some early parsing of the device-tree to setup out MEMBLOCK
|
2005-10-10 20:50:37 +08:00
|
|
|
* data structures, and allocate & initialize the hash table
|
|
|
|
* and segment tables so we can start running with translation
|
|
|
|
* enabled.
|
|
|
|
*
|
|
|
|
* It is this function which will call the probe() callback of
|
|
|
|
* the various platform types and copy the matching one to the
|
|
|
|
* global ppc_md structure. Your platform can eventually do
|
|
|
|
* some very early initializations from the probe() routine, but
|
|
|
|
* this is not recommended, be very careful as, for example, the
|
|
|
|
* device-tree is not accessible via normal means at this point.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void __init early_setup(unsigned long dt_ptr)
|
|
|
|
{
|
2013-02-14 01:03:16 +08:00
|
|
|
static __initdata struct paca_struct boot_paca;
|
|
|
|
|
2008-05-07 08:00:56 +08:00
|
|
|
/* -------- printk is _NOT_ safe to use here ! ------- */
|
|
|
|
|
2006-10-24 14:42:40 +08:00
|
|
|
/* Identify CPU type */
|
2006-11-10 17:38:53 +08:00
|
|
|
identify_cpu(0, mfspr(SPRN_PVR));
|
2006-10-24 14:42:40 +08:00
|
|
|
|
2006-06-28 11:18:53 +08:00
|
|
|
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
|
2010-01-28 21:23:22 +08:00
|
|
|
initialise_paca(&boot_paca, 0);
|
|
|
|
setup_paca(&boot_paca);
|
2013-02-12 22:44:50 +08:00
|
|
|
fixup_boot_paca();
|
2006-06-28 11:18:53 +08:00
|
|
|
|
2008-05-07 08:00:56 +08:00
|
|
|
/* -------- printk is now safe to use ------- */
|
|
|
|
|
2008-05-07 08:25:34 +08:00
|
|
|
/* Enable early debugging if any specified (see udbg.h) */
|
|
|
|
udbg_early_init();
|
|
|
|
|
2006-03-28 20:15:54 +08:00
|
|
|
DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
|
2005-10-10 20:50:37 +08:00
|
|
|
|
|
|
|
/*
|
2007-09-07 01:47:29 +08:00
|
|
|
* Do early initialization using the flattened device
|
|
|
|
* tree, such as retrieving the physical memory map or
|
|
|
|
* calculating/retrieving the hash table size.
|
2005-10-10 20:50:37 +08:00
|
|
|
*/
|
|
|
|
early_init_devtree(__va(dt_ptr));
|
|
|
|
|
2006-03-25 14:25:17 +08:00
|
|
|
/* Now we know the logical id of our boot cpu, setup the paca. */
|
2010-01-28 21:23:22 +08:00
|
|
|
setup_paca(&paca[boot_cpuid]);
|
2013-02-12 22:44:50 +08:00
|
|
|
fixup_boot_paca();
|
2006-03-25 14:25:17 +08:00
|
|
|
|
2016-07-05 13:03:46 +08:00
|
|
|
/*
|
2016-07-05 13:03:49 +08:00
|
|
|
* Configure exception handlers. This include setting up trampolines
|
|
|
|
* if needed, setting exception endian mode, etc...
|
2016-07-05 13:03:46 +08:00
|
|
|
*/
|
2016-07-05 13:03:49 +08:00
|
|
|
configure_exceptions();
|
2005-12-04 15:39:37 +08:00
|
|
|
|
2016-07-05 13:03:42 +08:00
|
|
|
/* Apply all the dynamic patching */
|
|
|
|
apply_feature_fixups();
|
2016-08-10 15:27:34 +08:00
|
|
|
setup_feature_keys();
|
2016-07-05 13:03:42 +08:00
|
|
|
|
2016-07-26 19:55:48 +08:00
|
|
|
/* Initialize the hash table or TLB handling */
|
|
|
|
early_init_mmu();
|
|
|
|
|
2014-03-28 10:36:29 +08:00
|
|
|
/*
|
|
|
|
* At this point, we can let interrupts switch to virtual mode
|
|
|
|
* (the MMU has been setup), so adjust the MSR in the PACA to
|
2014-03-28 10:36:30 +08:00
|
|
|
* have IR and DR set and enable AIL if it exists
|
2014-03-28 10:36:29 +08:00
|
|
|
*/
|
2014-03-28 10:36:30 +08:00
|
|
|
cpu_ready_for_interrupts();
|
2014-03-28 10:36:29 +08:00
|
|
|
|
2005-10-10 20:50:37 +08:00
|
|
|
DBG(" <- early_setup()\n");
|
2013-07-25 10:12:32 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
|
|
|
|
/*
|
|
|
|
* This needs to be done *last* (after the above DBG() even)
|
|
|
|
*
|
|
|
|
* Right after we return from this function, we turn on the MMU
|
|
|
|
* which means the real-mode access trick that btext does will
|
|
|
|
* no longer work, it needs to switch to using a real MMU
|
|
|
|
* mapping. This call will ensure that it does
|
|
|
|
*/
|
|
|
|
btext_map();
|
|
|
|
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
|
2005-10-10 20:50:37 +08:00
|
|
|
}
|
|
|
|
|
2005-11-10 10:37:51 +08:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
void early_setup_secondary(void)
|
|
|
|
{
|
2016-03-04 13:01:48 +08:00
|
|
|
/* Mark interrupts disabled in PACA */
|
2009-03-20 03:34:16 +08:00
|
|
|
get_paca()->soft_enabled = 0;
|
2005-11-10 10:37:51 +08:00
|
|
|
|
2009-03-20 03:34:16 +08:00
|
|
|
/* Initialize the hash table or TLB handling */
|
|
|
|
early_init_mmu_secondary();
|
2014-03-28 10:36:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point, we can let interrupts switch to virtual mode
|
|
|
|
* (the MMU has been setup), so adjust the MSR in the PACA to
|
|
|
|
* have IR and DR set.
|
|
|
|
*/
|
2014-03-28 10:36:30 +08:00
|
|
|
cpu_ready_for_interrupts();
|
2005-11-10 10:37:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|
2005-10-10 20:50:37 +08:00
|
|
|
|
2016-11-29 20:45:50 +08:00
|
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
|
2015-10-07 11:48:19 +08:00
|
|
|
static bool use_spinloop(void)
|
|
|
|
{
|
|
|
|
if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When book3e boots from kexec, the ePAPR spin table does
|
|
|
|
* not get used.
|
|
|
|
*/
|
|
|
|
return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
|
|
|
|
}
|
|
|
|
|
2005-11-04 09:09:42 +08:00
|
|
|
void smp_release_cpus(void)
|
|
|
|
{
|
2005-12-06 05:49:00 +08:00
|
|
|
unsigned long *ptr;
|
2011-03-16 11:54:35 +08:00
|
|
|
int i;
|
2005-11-04 09:09:42 +08:00
|
|
|
|
2015-10-07 11:48:19 +08:00
|
|
|
if (!use_spinloop())
|
|
|
|
return;
|
|
|
|
|
2005-11-04 09:09:42 +08:00
|
|
|
DBG(" -> smp_release_cpus()\n");
|
|
|
|
|
|
|
|
/* All secondary cpus are spinning on a common spinloop, release them
|
|
|
|
* all now so they can start to spin on their individual paca
|
|
|
|
* spinloops. For non SMP kernels, the secondary cpus never get out
|
|
|
|
* of the common spinloop.
|
2008-08-30 09:40:24 +08:00
|
|
|
*/
|
2005-11-04 09:09:42 +08:00
|
|
|
|
2005-12-06 05:49:00 +08:00
|
|
|
ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
|
|
|
|
- PHYSICAL_START);
|
2014-03-11 08:54:06 +08:00
|
|
|
*ptr = ppc_function_entry(generic_secondary_smp_init);
|
2011-03-16 11:54:35 +08:00
|
|
|
|
|
|
|
/* And wait a bit for them to catch up */
|
|
|
|
for (i = 0; i < 100000; i++) {
|
|
|
|
mb();
|
|
|
|
HMT_low();
|
2011-05-26 02:09:12 +08:00
|
|
|
if (spinning_secondaries == 0)
|
2011-03-16 11:54:35 +08:00
|
|
|
break;
|
|
|
|
udelay(1);
|
|
|
|
}
|
2011-05-26 02:09:12 +08:00
|
|
|
DBG("spinning_secondaries = %d\n", spinning_secondaries);
|
2005-11-04 09:09:42 +08:00
|
|
|
|
|
|
|
DBG(" <- smp_release_cpus()\n");
|
|
|
|
}
|
2016-11-29 20:45:50 +08:00
|
|
|
#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
|
2005-11-04 09:09:42 +08:00
|
|
|
|
2005-10-10 20:50:37 +08:00
|
|
|
/*
|
2005-11-10 10:37:51 +08:00
|
|
|
* Initialize some remaining members of the ppc64_caches and systemcfg
|
|
|
|
* structures
|
2005-10-10 20:50:37 +08:00
|
|
|
* (at least until we get rid of them completely). This is mostly some
|
|
|
|
* cache informations about the CPU that will be used by cache flush
|
|
|
|
* routines and/or provided to userland
|
|
|
|
*/
|
2017-01-09 07:31:47 +08:00
|
|
|
|
|
|
|
static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
|
|
|
|
u32 bsize, u32 sets)
|
|
|
|
{
|
|
|
|
info->size = size;
|
|
|
|
info->sets = sets;
|
|
|
|
info->line_size = lsize;
|
|
|
|
info->block_size = bsize;
|
|
|
|
info->log_block_size = __ilog2(bsize);
|
|
|
|
info->blocks_per_page = PAGE_SIZE / bsize;
|
2017-02-03 14:20:07 +08:00
|
|
|
|
|
|
|
if (sets == 0)
|
|
|
|
info->assoc = 0xffff;
|
|
|
|
else
|
|
|
|
info->assoc = size / (sets * lsize);
|
2017-01-09 07:31:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool __init parse_cache_info(struct device_node *np,
|
|
|
|
bool icache,
|
|
|
|
struct ppc_cache_info *info)
|
|
|
|
{
|
|
|
|
static const char *ipropnames[] __initdata = {
|
|
|
|
"i-cache-size",
|
|
|
|
"i-cache-sets",
|
|
|
|
"i-cache-block-size",
|
|
|
|
"i-cache-line-size",
|
|
|
|
};
|
|
|
|
static const char *dpropnames[] __initdata = {
|
|
|
|
"d-cache-size",
|
|
|
|
"d-cache-sets",
|
|
|
|
"d-cache-block-size",
|
|
|
|
"d-cache-line-size",
|
|
|
|
};
|
|
|
|
const char **propnames = icache ? ipropnames : dpropnames;
|
|
|
|
const __be32 *sizep, *lsizep, *bsizep, *setsp;
|
|
|
|
u32 size, lsize, bsize, sets;
|
|
|
|
bool success = true;
|
|
|
|
|
|
|
|
size = 0;
|
|
|
|
sets = -1u;
|
|
|
|
lsize = bsize = cur_cpu_spec->dcache_bsize;
|
|
|
|
sizep = of_get_property(np, propnames[0], NULL);
|
|
|
|
if (sizep != NULL)
|
|
|
|
size = be32_to_cpu(*sizep);
|
|
|
|
setsp = of_get_property(np, propnames[1], NULL);
|
|
|
|
if (setsp != NULL)
|
|
|
|
sets = be32_to_cpu(*setsp);
|
|
|
|
bsizep = of_get_property(np, propnames[2], NULL);
|
|
|
|
lsizep = of_get_property(np, propnames[3], NULL);
|
|
|
|
if (bsizep == NULL)
|
|
|
|
bsizep = lsizep;
|
|
|
|
if (lsizep != NULL)
|
|
|
|
lsize = be32_to_cpu(*lsizep);
|
|
|
|
if (bsizep != NULL)
|
|
|
|
bsize = be32_to_cpu(*bsizep);
|
|
|
|
if (sizep == NULL || bsizep == NULL || lsizep == NULL)
|
|
|
|
success = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OF is weird .. it represents fully associative caches
|
|
|
|
* as "1 way" which doesn't make much sense and doesn't
|
|
|
|
* leave room for direct mapped. We'll assume that 0
|
|
|
|
* in OF means direct mapped for that reason.
|
|
|
|
*/
|
|
|
|
if (sets == 1)
|
|
|
|
sets = 0;
|
|
|
|
else if (sets == 0)
|
|
|
|
sets = 1;
|
|
|
|
|
|
|
|
init_cache_info(info, size, lsize, bsize, sets);
|
|
|
|
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
2016-07-05 13:07:51 +08:00
|
|
|
void __init initialize_cache_info(void)
|
2005-10-10 20:50:37 +08:00
|
|
|
{
|
2017-01-09 07:31:49 +08:00
|
|
|
struct device_node *cpu = NULL, *l2, *l3 = NULL;
|
|
|
|
u32 pvr;
|
2005-10-10 20:50:37 +08:00
|
|
|
|
|
|
|
DBG(" -> initialize_cache_info()\n");
|
|
|
|
|
2017-01-09 07:31:49 +08:00
|
|
|
/*
|
|
|
|
* All shipping POWER8 machines have a firmware bug that
|
|
|
|
* puts incorrect information in the device-tree. This will
|
|
|
|
* be (hopefully) fixed for future chips but for now hard
|
|
|
|
* code the values if we are running on one of these
|
|
|
|
*/
|
|
|
|
pvr = PVR_VER(mfspr(SPRN_PVR));
|
|
|
|
if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
|
|
|
|
pvr == PVR_POWER8NVL) {
|
|
|
|
/* size lsize blk sets */
|
|
|
|
init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
|
|
|
|
init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
|
|
|
|
init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
|
|
|
|
init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
|
|
|
|
} else
|
|
|
|
cpu = of_find_node_by_type(NULL, "cpu");
|
2005-10-10 20:50:37 +08:00
|
|
|
|
2017-01-09 07:31:47 +08:00
|
|
|
/*
|
|
|
|
* We're assuming *all* of the CPUs have the same
|
|
|
|
* d-cache and i-cache sizes... -Peter
|
|
|
|
*/
|
2017-01-09 07:31:48 +08:00
|
|
|
if (cpu) {
|
|
|
|
if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
|
2017-01-09 07:31:47 +08:00
|
|
|
DBG("Argh, can't find dcache properties !\n");
|
|
|
|
|
2017-01-09 07:31:48 +08:00
|
|
|
if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
|
2017-01-09 07:31:47 +08:00
|
|
|
DBG("Argh, can't find icache properties !\n");
|
2017-01-09 07:31:48 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to find the L2 and L3 if any. Assume they are
|
|
|
|
* unified and use the D-side properties.
|
|
|
|
*/
|
|
|
|
l2 = of_find_next_cache_node(cpu);
|
|
|
|
of_node_put(cpu);
|
|
|
|
if (l2) {
|
|
|
|
parse_cache_info(l2, false, &ppc64_caches.l2);
|
|
|
|
l3 = of_find_next_cache_node(l2);
|
|
|
|
of_node_put(l2);
|
|
|
|
}
|
|
|
|
if (l3) {
|
|
|
|
parse_cache_info(l3, false, &ppc64_caches.l3);
|
|
|
|
of_node_put(l3);
|
|
|
|
}
|
2005-10-10 20:50:37 +08:00
|
|
|
}
|
|
|
|
|
2016-07-05 13:04:08 +08:00
|
|
|
/* For use by binfmt_elf */
|
2017-01-09 07:31:47 +08:00
|
|
|
dcache_bsize = ppc64_caches.l1d.block_size;
|
|
|
|
icache_bsize = ppc64_caches.l1i.block_size;
|
2016-07-05 13:04:08 +08:00
|
|
|
|
2005-10-10 20:50:37 +08:00
|
|
|
DBG(" <- initialize_cache_info()\n");
|
|
|
|
}
|
|
|
|
|
2011-05-03 22:07:01 +08:00
|
|
|
/* This returns the limit below which memory accesses to the linear
|
|
|
|
* mapping are guarnateed not to cause a TLB or SLB miss. This is
|
|
|
|
* used to allocate interrupt or emergency stacks for which our
|
|
|
|
* exception entry path doesn't deal with being interrupted.
|
|
|
|
*/
|
2016-07-05 13:07:50 +08:00
|
|
|
static __init u64 safe_stack_limit(void)
|
2010-05-11 02:59:18 +08:00
|
|
|
{
|
2011-05-03 22:07:01 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3E
|
|
|
|
/* Freescale BookE bolts the entire linear mapping */
|
|
|
|
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
|
|
|
|
return linear_map_top;
|
|
|
|
/* Other BookE, we assume the first GB is bolted */
|
|
|
|
return 1ul << 30;
|
|
|
|
#else
|
|
|
|
/* BookS, the first segment is bolted */
|
|
|
|
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
|
2010-05-11 02:59:18 +08:00
|
|
|
return 1UL << SID_SHIFT_1T;
|
|
|
|
return 1UL << SID_SHIFT;
|
2011-05-03 22:07:01 +08:00
|
|
|
#endif
|
2010-05-11 02:59:18 +08:00
|
|
|
}
|
|
|
|
|
2016-07-05 13:07:51 +08:00
|
|
|
void __init irqstack_early_init(void)
|
2005-10-10 20:50:37 +08:00
|
|
|
{
|
2011-05-03 22:07:01 +08:00
|
|
|
u64 limit = safe_stack_limit();
|
2005-10-10 20:50:37 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/*
|
2010-12-08 08:55:03 +08:00
|
|
|
* Interrupt stacks must be in the first segment since we
|
|
|
|
* cannot afford to take SLB misses on them.
|
2005-10-10 20:50:37 +08:00
|
|
|
*/
|
2006-03-29 06:50:51 +08:00
|
|
|
for_each_possible_cpu(i) {
|
2005-11-07 08:06:55 +08:00
|
|
|
softirq_ctx[i] = (struct thread_info *)
|
2010-07-12 12:36:09 +08:00
|
|
|
__va(memblock_alloc_base(THREAD_SIZE,
|
2010-05-11 02:59:18 +08:00
|
|
|
THREAD_SIZE, limit));
|
2005-11-07 08:06:55 +08:00
|
|
|
hardirq_ctx[i] = (struct thread_info *)
|
2010-07-12 12:36:09 +08:00
|
|
|
__va(memblock_alloc_base(THREAD_SIZE,
|
2010-05-11 02:59:18 +08:00
|
|
|
THREAD_SIZE, limit));
|
2005-10-10 20:50:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-24 07:15:59 +08:00
|
|
|
#ifdef CONFIG_PPC_BOOK3E
|
2016-07-05 13:07:51 +08:00
|
|
|
void __init exc_lvl_early_init(void)
|
2009-07-24 07:15:59 +08:00
|
|
|
{
|
|
|
|
unsigned int i;
|
2013-10-23 17:31:21 +08:00
|
|
|
unsigned long sp;
|
2009-07-24 07:15:59 +08:00
|
|
|
|
|
|
|
for_each_possible_cpu(i) {
|
2013-10-23 17:31:21 +08:00
|
|
|
sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
|
|
|
critirq_ctx[i] = (struct thread_info *)__va(sp);
|
|
|
|
paca[i].crit_kstack = __va(sp + THREAD_SIZE);
|
|
|
|
|
|
|
|
sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
|
|
|
dbgirq_ctx[i] = (struct thread_info *)__va(sp);
|
|
|
|
paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
|
|
|
|
|
|
|
|
sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
|
|
|
mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
|
|
|
|
paca[i].mc_kstack = __va(sp + THREAD_SIZE);
|
2009-07-24 07:15:59 +08:00
|
|
|
}
|
2011-04-06 13:18:48 +08:00
|
|
|
|
|
|
|
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
|
2013-05-12 07:26:23 +08:00
|
|
|
patch_exception(0x040, exc_debug_debug_book3e);
|
2009-07-24 07:15:59 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-10-10 20:50:37 +08:00
|
|
|
/*
|
|
|
|
* Stack space used when we detect a bad kernel stack pointer, and
|
2013-10-30 22:34:00 +08:00
|
|
|
* early in SMP boots before relocation is enabled. Exclusive emergency
|
|
|
|
* stack for machine checks.
|
2005-10-10 20:50:37 +08:00
|
|
|
*/
|
2016-07-05 13:07:51 +08:00
|
|
|
void __init emergency_stack_init(void)
|
2005-10-10 20:50:37 +08:00
|
|
|
{
|
2010-05-11 02:59:18 +08:00
|
|
|
u64 limit;
|
2005-10-10 20:50:37 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Emergency stacks must be under 256MB, we cannot afford to take
|
|
|
|
* SLB misses on them. The ABI also requires them to be 128-byte
|
|
|
|
* aligned.
|
|
|
|
*
|
|
|
|
* Since we use these as temporary stacks during secondary CPU
|
|
|
|
* bringup, we need to get at them in real mode. This means they
|
|
|
|
* must also be within the RMO region.
|
|
|
|
*/
|
2011-05-03 22:07:01 +08:00
|
|
|
limit = min(safe_stack_limit(), ppc64_rma_size);
|
2005-10-10 20:50:37 +08:00
|
|
|
|
2008-04-30 11:21:45 +08:00
|
|
|
for_each_possible_cpu(i) {
|
2016-03-24 19:04:04 +08:00
|
|
|
struct thread_info *ti;
|
|
|
|
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
|
|
|
klp_init_thread_info(ti);
|
|
|
|
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
|
2013-10-30 22:34:00 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
/* emergency stack for machine check exception handling. */
|
2016-03-24 19:04:04 +08:00
|
|
|
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
|
|
|
klp_init_thread_info(ti);
|
|
|
|
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
|
2013-10-30 22:34:00 +08:00
|
|
|
#endif
|
2008-04-30 11:21:45 +08:00
|
|
|
}
|
2005-10-10 20:50:37 +08:00
|
|
|
}
|
|
|
|
|
[PATCH] powerpc/64: per cpu data optimisations
The current ppc64 per cpu data implementation is quite slow. eg:
lhz 11,18(13) /* smp_processor_id() */
ld 9,.LC63-.LCTOC1(30) /* per_cpu__variable_name */
ld 8,.LC61-.LCTOC1(30) /* __per_cpu_offset */
sldi 11,11,3 /* form index into __per_cpu_offset */
mr 10,9
ldx 9,11,8 /* __per_cpu_offset[smp_processor_id()] */
ldx 0,10,9 /* load per cpu data */
5 loads for something that is supposed to be fast, pretty awful. One
reason for the large number of loads is that we have to synthesize 2
64bit constants (per_cpu__variable_name and __per_cpu_offset).
By putting __per_cpu_offset into the paca we can avoid the 2 loads
associated with it:
ld 11,56(13) /* paca->data_offset */
ld 9,.LC59-.LCTOC1(30) /* per_cpu__variable_name */
ldx 0,9,11 /* load per cpu data
Longer term we can should be able to do even better than 3 loads.
If per_cpu__variable_name wasnt a 64bit constant and paca->data_offset
was in a register we could cut it down to one load. A suggestion from
Rusty is to use gcc's __thread extension here. In order to do this we
would need to free up r13 (the __thread register and where the paca
currently is). So far Ive had a few unsuccessful attempts at doing that :)
The patch also allocates per cpu memory node local on NUMA machines.
This patch from Rusty has been sitting in my queue _forever_ but stalled
when I hit the compiler bug. Sorry about that.
Finally I also only allocate per cpu data for possible cpus, which comes
straight out of the x86-64 port. On a pseries kernel (with NR_CPUS == 128)
and 4 possible cpus we see some nice gains:
total used free shared buffers cached
Mem: 4012228 212860 3799368 0 0 162424
total used free shared buffers cached
Mem: 4016200 212984 3803216 0 0 162424
A saving of 3.75MB. Quite nice for smaller machines. Note: we now have
to be careful of per cpu users that touch data for !possible cpus.
At this stage it might be worth making the NUMA and possible cpu
optimisations generic, but per cpu init is done so early we have to be
careful that all architectures have their possible map setup correctly.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-01-11 10:16:44 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2009-08-14 14:00:53 +08:00
|
|
|
#define PCPU_DYN_SIZE ()
|
|
|
|
|
|
|
|
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
[PATCH] powerpc/64: per cpu data optimisations
The current ppc64 per cpu data implementation is quite slow. eg:
lhz 11,18(13) /* smp_processor_id() */
ld 9,.LC63-.LCTOC1(30) /* per_cpu__variable_name */
ld 8,.LC61-.LCTOC1(30) /* __per_cpu_offset */
sldi 11,11,3 /* form index into __per_cpu_offset */
mr 10,9
ldx 9,11,8 /* __per_cpu_offset[smp_processor_id()] */
ldx 0,10,9 /* load per cpu data */
5 loads for something that is supposed to be fast, pretty awful. One
reason for the large number of loads is that we have to synthesize 2
64bit constants (per_cpu__variable_name and __per_cpu_offset).
By putting __per_cpu_offset into the paca we can avoid the 2 loads
associated with it:
ld 11,56(13) /* paca->data_offset */
ld 9,.LC59-.LCTOC1(30) /* per_cpu__variable_name */
ldx 0,9,11 /* load per cpu data
Longer term we can should be able to do even better than 3 loads.
If per_cpu__variable_name wasnt a 64bit constant and paca->data_offset
was in a register we could cut it down to one load. A suggestion from
Rusty is to use gcc's __thread extension here. In order to do this we
would need to free up r13 (the __thread register and where the paca
currently is). So far Ive had a few unsuccessful attempts at doing that :)
The patch also allocates per cpu memory node local on NUMA machines.
This patch from Rusty has been sitting in my queue _forever_ but stalled
when I hit the compiler bug. Sorry about that.
Finally I also only allocate per cpu data for possible cpus, which comes
straight out of the x86-64 port. On a pseries kernel (with NR_CPUS == 128)
and 4 possible cpus we see some nice gains:
total used free shared buffers cached
Mem: 4012228 212860 3799368 0 0 162424
total used free shared buffers cached
Mem: 4016200 212984 3803216 0 0 162424
A saving of 3.75MB. Quite nice for smaller machines. Note: we now have
to be careful of per cpu users that touch data for !possible cpus.
At this stage it might be worth making the NUMA and possible cpu
optimisations generic, but per cpu init is done so early we have to be
careful that all architectures have their possible map setup correctly.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-01-11 10:16:44 +08:00
|
|
|
{
|
2009-08-14 14:00:53 +08:00
|
|
|
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
|
|
|
|
__pa(MAX_DMA_ADDRESS));
|
|
|
|
}
|
[PATCH] powerpc/64: per cpu data optimisations
The current ppc64 per cpu data implementation is quite slow. eg:
lhz 11,18(13) /* smp_processor_id() */
ld 9,.LC63-.LCTOC1(30) /* per_cpu__variable_name */
ld 8,.LC61-.LCTOC1(30) /* __per_cpu_offset */
sldi 11,11,3 /* form index into __per_cpu_offset */
mr 10,9
ldx 9,11,8 /* __per_cpu_offset[smp_processor_id()] */
ldx 0,10,9 /* load per cpu data */
5 loads for something that is supposed to be fast, pretty awful. One
reason for the large number of loads is that we have to synthesize 2
64bit constants (per_cpu__variable_name and __per_cpu_offset).
By putting __per_cpu_offset into the paca we can avoid the 2 loads
associated with it:
ld 11,56(13) /* paca->data_offset */
ld 9,.LC59-.LCTOC1(30) /* per_cpu__variable_name */
ldx 0,9,11 /* load per cpu data
Longer term we can should be able to do even better than 3 loads.
If per_cpu__variable_name wasnt a 64bit constant and paca->data_offset
was in a register we could cut it down to one load. A suggestion from
Rusty is to use gcc's __thread extension here. In order to do this we
would need to free up r13 (the __thread register and where the paca
currently is). So far Ive had a few unsuccessful attempts at doing that :)
The patch also allocates per cpu memory node local on NUMA machines.
This patch from Rusty has been sitting in my queue _forever_ but stalled
when I hit the compiler bug. Sorry about that.
Finally I also only allocate per cpu data for possible cpus, which comes
straight out of the x86-64 port. On a pseries kernel (with NR_CPUS == 128)
and 4 possible cpus we see some nice gains:
total used free shared buffers cached
Mem: 4012228 212860 3799368 0 0 162424
total used free shared buffers cached
Mem: 4016200 212984 3803216 0 0 162424
A saving of 3.75MB. Quite nice for smaller machines. Note: we now have
to be careful of per cpu users that touch data for !possible cpus.
At this stage it might be worth making the NUMA and possible cpu
optimisations generic, but per cpu init is done so early we have to be
careful that all architectures have their possible map setup correctly.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-01-11 10:16:44 +08:00
|
|
|
|
2009-08-14 14:00:53 +08:00
|
|
|
static void __init pcpu_fc_free(void *ptr, size_t size)
|
|
|
|
{
|
|
|
|
free_bootmem(__pa(ptr), size);
|
|
|
|
}
|
[PATCH] powerpc/64: per cpu data optimisations
The current ppc64 per cpu data implementation is quite slow. eg:
lhz 11,18(13) /* smp_processor_id() */
ld 9,.LC63-.LCTOC1(30) /* per_cpu__variable_name */
ld 8,.LC61-.LCTOC1(30) /* __per_cpu_offset */
sldi 11,11,3 /* form index into __per_cpu_offset */
mr 10,9
ldx 9,11,8 /* __per_cpu_offset[smp_processor_id()] */
ldx 0,10,9 /* load per cpu data */
5 loads for something that is supposed to be fast, pretty awful. One
reason for the large number of loads is that we have to synthesize 2
64bit constants (per_cpu__variable_name and __per_cpu_offset).
By putting __per_cpu_offset into the paca we can avoid the 2 loads
associated with it:
ld 11,56(13) /* paca->data_offset */
ld 9,.LC59-.LCTOC1(30) /* per_cpu__variable_name */
ldx 0,9,11 /* load per cpu data
Longer term we can should be able to do even better than 3 loads.
If per_cpu__variable_name wasnt a 64bit constant and paca->data_offset
was in a register we could cut it down to one load. A suggestion from
Rusty is to use gcc's __thread extension here. In order to do this we
would need to free up r13 (the __thread register and where the paca
currently is). So far Ive had a few unsuccessful attempts at doing that :)
The patch also allocates per cpu memory node local on NUMA machines.
This patch from Rusty has been sitting in my queue _forever_ but stalled
when I hit the compiler bug. Sorry about that.
Finally I also only allocate per cpu data for possible cpus, which comes
straight out of the x86-64 port. On a pseries kernel (with NR_CPUS == 128)
and 4 possible cpus we see some nice gains:
total used free shared buffers cached
Mem: 4012228 212860 3799368 0 0 162424
total used free shared buffers cached
Mem: 4016200 212984 3803216 0 0 162424
A saving of 3.75MB. Quite nice for smaller machines. Note: we now have
to be careful of per cpu users that touch data for !possible cpus.
At this stage it might be worth making the NUMA and possible cpu
optimisations generic, but per cpu init is done so early we have to be
careful that all architectures have their possible map setup correctly.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-01-11 10:16:44 +08:00
|
|
|
|
2009-08-14 14:00:53 +08:00
|
|
|
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
|
|
|
|
{
|
|
|
|
if (cpu_to_node(from) == cpu_to_node(to))
|
|
|
|
return LOCAL_DISTANCE;
|
|
|
|
else
|
|
|
|
return REMOTE_DISTANCE;
|
|
|
|
}
|
|
|
|
|
powerpc: Optimise per cpu accesses on 64bit
Now we dynamically allocate the paca array, it takes an extra load
whenever we want to access another cpu's paca. One place we do that a lot
is per cpu variables. A simple example:
DEFINE_PER_CPU(unsigned long, vara);
unsigned long test4(int cpu)
{
return per_cpu(vara, cpu);
}
This takes 4 loads, 5 if you include the actual load of the per cpu variable:
ld r11,-32760(r30) # load address of paca pointer
ld r9,-32768(r30) # load link address of percpu variable
sldi r3,r29,9 # get offset into paca (each entry is 512 bytes)
ld r0,0(r11) # load paca pointer
add r3,r0,r3 # paca + offset
ld r11,64(r3) # load paca[cpu].data_offset
ldx r3,r9,r11 # load per cpu variable
If we remove the ppc64 specific per_cpu_offset(), we get the generic one
which indexes into a statically allocated array. This removes one load and
one add:
ld r11,-32760(r30) # load address of __per_cpu_offset
ld r9,-32768(r30) # load link address of percpu variable
sldi r3,r29,3 # get offset into __per_cpu_offset (each entry 8 bytes)
ldx r11,r11,r3 # load __per_cpu_offset[cpu]
ldx r3,r9,r11 # load per cpu variable
Having all the offsets in one array also helps when iterating over a per cpu
variable across a number of cpus, such as in the scheduler. Before we would
need to load one paca cacheline when calculating each per cpu offset. Now we
have 16 (128 / sizeof(long)) per cpu offsets in each cacheline.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-06-01 02:45:11 +08:00
|
|
|
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
|
|
|
|
EXPORT_SYMBOL(__per_cpu_offset);
|
|
|
|
|
2009-08-14 14:00:53 +08:00
|
|
|
void __init setup_per_cpu_areas(void)
|
|
|
|
{
|
|
|
|
const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
|
|
|
|
size_t atom_size;
|
|
|
|
unsigned long delta;
|
|
|
|
unsigned int cpu;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Linear mapping is one of 4K, 1M and 16M. For 4K, no need
|
|
|
|
* to group units. For larger mappings, use 1M atom which
|
|
|
|
* should be large enough to contain a number of units.
|
|
|
|
*/
|
|
|
|
if (mmu_linear_psize == MMU_PAGE_4K)
|
|
|
|
atom_size = PAGE_SIZE;
|
|
|
|
else
|
|
|
|
atom_size = 1 << 20;
|
|
|
|
|
|
|
|
rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
|
|
|
|
pcpu_fc_alloc, pcpu_fc_free);
|
|
|
|
if (rc < 0)
|
|
|
|
panic("cannot initialize percpu area (err=%d)", rc);
|
|
|
|
|
|
|
|
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
|
powerpc: Optimise per cpu accesses on 64bit
Now we dynamically allocate the paca array, it takes an extra load
whenever we want to access another cpu's paca. One place we do that a lot
is per cpu variables. A simple example:
DEFINE_PER_CPU(unsigned long, vara);
unsigned long test4(int cpu)
{
return per_cpu(vara, cpu);
}
This takes 4 loads, 5 if you include the actual load of the per cpu variable:
ld r11,-32760(r30) # load address of paca pointer
ld r9,-32768(r30) # load link address of percpu variable
sldi r3,r29,9 # get offset into paca (each entry is 512 bytes)
ld r0,0(r11) # load paca pointer
add r3,r0,r3 # paca + offset
ld r11,64(r3) # load paca[cpu].data_offset
ldx r3,r9,r11 # load per cpu variable
If we remove the ppc64 specific per_cpu_offset(), we get the generic one
which indexes into a statically allocated array. This removes one load and
one add:
ld r11,-32760(r30) # load address of __per_cpu_offset
ld r9,-32768(r30) # load link address of percpu variable
sldi r3,r29,3 # get offset into __per_cpu_offset (each entry 8 bytes)
ldx r11,r11,r3 # load __per_cpu_offset[cpu]
ldx r3,r9,r11 # load per cpu variable
Having all the offsets in one array also helps when iterating over a per cpu
variable across a number of cpus, such as in the scheduler. Before we would
need to load one paca cacheline when calculating each per cpu offset. Now we
have 16 (128 / sizeof(long)) per cpu offsets in each cacheline.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2010-06-01 02:45:11 +08:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
|
|
|
|
paca[cpu].data_offset = __per_cpu_offset[cpu];
|
|
|
|
}
|
[PATCH] powerpc/64: per cpu data optimisations
The current ppc64 per cpu data implementation is quite slow. eg:
lhz 11,18(13) /* smp_processor_id() */
ld 9,.LC63-.LCTOC1(30) /* per_cpu__variable_name */
ld 8,.LC61-.LCTOC1(30) /* __per_cpu_offset */
sldi 11,11,3 /* form index into __per_cpu_offset */
mr 10,9
ldx 9,11,8 /* __per_cpu_offset[smp_processor_id()] */
ldx 0,10,9 /* load per cpu data */
5 loads for something that is supposed to be fast, pretty awful. One
reason for the large number of loads is that we have to synthesize 2
64bit constants (per_cpu__variable_name and __per_cpu_offset).
By putting __per_cpu_offset into the paca we can avoid the 2 loads
associated with it:
ld 11,56(13) /* paca->data_offset */
ld 9,.LC59-.LCTOC1(30) /* per_cpu__variable_name */
ldx 0,9,11 /* load per cpu data
Longer term we can should be able to do even better than 3 loads.
If per_cpu__variable_name wasnt a 64bit constant and paca->data_offset
was in a register we could cut it down to one load. A suggestion from
Rusty is to use gcc's __thread extension here. In order to do this we
would need to free up r13 (the __thread register and where the paca
currently is). So far Ive had a few unsuccessful attempts at doing that :)
The patch also allocates per cpu memory node local on NUMA machines.
This patch from Rusty has been sitting in my queue _forever_ but stalled
when I hit the compiler bug. Sorry about that.
Finally I also only allocate per cpu data for possible cpus, which comes
straight out of the x86-64 port. On a pseries kernel (with NR_CPUS == 128)
and 4 possible cpus we see some nice gains:
total used free shared buffers cached
Mem: 4012228 212860 3799368 0 0 162424
total used free shared buffers cached
Mem: 4016200 212984 3803216 0 0 162424
A saving of 3.75MB. Quite nice for smaller machines. Note: we now have
to be careful of per cpu users that touch data for !possible cpus.
At this stage it might be worth making the NUMA and possible cpu
optimisations generic, but per cpu init is done so early we have to be
careful that all architectures have their possible map setup correctly.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-01-11 10:16:44 +08:00
|
|
|
}
|
|
|
|
#endif
|
[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits
This patch reworks the way iSeries hooks on PCI IO operations (both MMIO
and PIO) and provides a generic way for other platforms to do so (we
have need to do that for various other platforms).
While reworking the IO ops, I ended up doing some spring cleaning in
io.h and eeh.h which I might want to split into 2 or 3 patches (among
others, eeh.h had a lot of useless stuff in it).
A side effect is that EEH for PIO should work now (it used to pass IO
ports down to the eeh address check functions which is bogus).
Also, new are MMIO "repeat" ops, which other archs like ARM already had,
and that we have too now: readsb, readsw, readsl, writesb, writesw,
writesl.
In the long run, I might also make EEH use the hooks instead
of wrapping at the toplevel, which would make things even cleaner and
relegate EEH completely in platforms/iseries, but we have to measure the
performance impact there (though it's really only on MMIO reads)
Since I also need to hook on ioremap, I shuffled the functions a bit
there. I introduced ioremap_flags() to use by drivers who want to pass
explicit flags to ioremap (and it can be hooked). The old __ioremap() is
still there as a low level and cannot be hooked, thus drivers who use it
should migrate unless they know they want the low level version.
The patch "arch provides generic iomap missing accessors" (should be
number 4 in this series) is a pre-requisite to provide full iomap
API support with this patch.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-11 14:25:10 +08:00
|
|
|
|
2014-06-04 15:50:47 +08:00
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
|
|
|
|
unsigned long memory_block_size_bytes(void)
|
|
|
|
{
|
|
|
|
if (ppc_md.memory_block_size)
|
|
|
|
return ppc_md.memory_block_size();
|
|
|
|
|
|
|
|
return MIN_MEMORY_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
#endif
|
[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits
This patch reworks the way iSeries hooks on PCI IO operations (both MMIO
and PIO) and provides a generic way for other platforms to do so (we
have need to do that for various other platforms).
While reworking the IO ops, I ended up doing some spring cleaning in
io.h and eeh.h which I might want to split into 2 or 3 patches (among
others, eeh.h had a lot of useless stuff in it).
A side effect is that EEH for PIO should work now (it used to pass IO
ports down to the eeh address check functions which is bogus).
Also, new are MMIO "repeat" ops, which other archs like ARM already had,
and that we have too now: readsb, readsw, readsl, writesb, writesw,
writesl.
In the long run, I might also make EEH use the hooks instead
of wrapping at the toplevel, which would make things even cleaner and
relegate EEH completely in platforms/iseries, but we have to measure the
performance impact there (though it's really only on MMIO reads)
Since I also need to hook on ioremap, I shuffled the functions a bit
there. I introduced ioremap_flags() to use by drivers who want to pass
explicit flags to ioremap (and it can be hooked). The old __ioremap() is
still there as a low level and cannot be hooked, thus drivers who use it
should migrate unless they know they want the low level version.
The patch "arch provides generic iomap missing accessors" (should be
number 4 in this series) is a pre-requisite to provide full iomap
API support with this patch.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-11 14:25:10 +08:00
|
|
|
|
2013-07-15 11:03:08 +08:00
|
|
|
#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
|
[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits
This patch reworks the way iSeries hooks on PCI IO operations (both MMIO
and PIO) and provides a generic way for other platforms to do so (we
have need to do that for various other platforms).
While reworking the IO ops, I ended up doing some spring cleaning in
io.h and eeh.h which I might want to split into 2 or 3 patches (among
others, eeh.h had a lot of useless stuff in it).
A side effect is that EEH for PIO should work now (it used to pass IO
ports down to the eeh address check functions which is bogus).
Also, new are MMIO "repeat" ops, which other archs like ARM already had,
and that we have too now: readsb, readsw, readsl, writesb, writesw,
writesl.
In the long run, I might also make EEH use the hooks instead
of wrapping at the toplevel, which would make things even cleaner and
relegate EEH completely in platforms/iseries, but we have to measure the
performance impact there (though it's really only on MMIO reads)
Since I also need to hook on ioremap, I shuffled the functions a bit
there. I introduced ioremap_flags() to use by drivers who want to pass
explicit flags to ioremap (and it can be hooked). The old __ioremap() is
still there as a low level and cannot be hooked, thus drivers who use it
should migrate unless they know they want the low level version.
The patch "arch provides generic iomap missing accessors" (should be
number 4 in this series) is a pre-requisite to provide full iomap
API support with this patch.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-11 14:25:10 +08:00
|
|
|
struct ppc_pci_io ppc_pci_io;
|
|
|
|
EXPORT_SYMBOL(ppc_pci_io);
|
2013-07-15 11:03:08 +08:00
|
|
|
#endif
|
2015-04-09 10:52:56 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
|
|
|
u64 hw_nmi_get_sample_period(int watchdog_thresh)
|
|
|
|
{
|
|
|
|
return ppc_proc_freq * watchdog_thresh;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The hardlockup detector breaks PMU event based branches and is likely
|
|
|
|
* to get false positives in KVM guests, so disable it by default.
|
|
|
|
*/
|
|
|
|
static int __init disable_hardlockup_detector(void)
|
|
|
|
{
|
powerpc updates for 4.1
- Numerous minor fixes, cleanups etc.
- More EEH work from Gavin to remove its dependency on device_nodes.
- Memory hotplug implemented entirely in the kernel from Nathan Fontenot.
- Removal of redundant CONFIG_PPC_OF by Kevin Hao.
- Rewrite of VPHN parsing logic & tests from Greg Kurz.
- A fix from Nish Aravamudan to reduce memory usage by clamping
nodes_possible_map.
- Support for pstore on powernv from Hari Bathini.
- Removal of old powerpc specific byte swap routines by David Gibson.
- Fix from Vasant Hegde to prevent the flash driver telling you it was flashing
your firmware when it wasn't.
- Patch from Ben Herrenschmidt to add an OPAL heartbeat driver.
- Fix for an oops causing get/put_cpu_var() imbalance in perf by Jan Stancek.
- Some fixes for migration from Tyrel Datwyler.
- A new syscall to switch the cpu endian by Michael Ellerman.
- Large series from Wei Yang to implement SRIOV, reviewed and acked by Bjorn.
- A fix for the OPAL sensor driver from Cédric Le Goater.
- Fixes to get STRICT_MM_TYPECHECKS building again by Michael Ellerman.
- Large series from Daniel Axtens to make our PCI hooks per PHB rather than per
machine.
- Small patch from Sam Bobroff to explicitly abort non-suspended transactions
on syscalls, plus a test to exercise it.
- Numerous reworks and fixes for the 24x7 PMU from Sukadev Bhattiprolu.
- Small patch to enable the hard lockup detector from Anton Blanchard.
- Fix from Dave Olson for missing L2 cache information on some CPUs.
- Some fixes from Michael Ellerman to get Cell machines booting again.
- Freescale updates from Scott: Highlights include BMan device tree nodes, an
MSI erratum workaround, a couple minor performance improvements, config
updates, and misc fixes/cleanup.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1
iQIcBAABAgAGBQJVL2cxAAoJEFHr6jzI4aWAR8cP/19VTo/CzCE4ffPSx7qR464n
F+WFZcbNjIMXu6+B0YLuJZEsuWtKKrCit/MCg3+mSgE4iqvxmtI+HDD0445Buszj
UD4E4HMdPrXQ+KUSUDORvRjv/FFUXIa94LSv/0g2UeMsPz/HeZlhMxEu7AkXw9Nf
rTxsmRTsOWME85Y/c9ss7XHuWKXT3DJV7fOoK9roSaN3dJAuWTtG3WaKS0nUu0ok
0M81D6ZczoD6ybwh2DUMPD9K6SGxLdQ4OzQwtW6vWzcQIBDfy5Pdeo0iAFhGPvXf
T4LLPkv4cF4AwHsAC4rKDPHQNa+oZBoLlScrHClaebAlDiv+XYKNdMogawUObvSh
h7avKmQr0Ygp1OvvZAaXLhuDJI9FJJ8lf6AOIeULgHsDR9SyKMjZWxRzPe11uarO
Fyi0qj3oJaQu6LjazZraApu8mo+JBtQuD3z3o5GhLxeFtBBF60JXj6zAXJikufnl
kk1/BUF10nKUhtKcDX767AMUCtMH3fp5hx8K/z9T5v+pobJB26Wup1bbdT68pNBT
NjdKUppV6QTjZvCsA6U2/ECu6E9KeIaFtFSL2IRRoiI0dWBN5/5eYn3RGkO2ZFoL
1NdwKA2XJcchwTPkpSRrUG70sYH0uM2AldNYyaLfjzrQqza7Y6lF699ilxWmCN/H
OplzJAE5cQ8Am078veTW
=03Yh
-----END PGP SIGNATURE-----
Merge tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman:
- Numerous minor fixes, cleanups etc.
- More EEH work from Gavin to remove its dependency on device_nodes.
- Memory hotplug implemented entirely in the kernel from Nathan
Fontenot.
- Removal of redundant CONFIG_PPC_OF by Kevin Hao.
- Rewrite of VPHN parsing logic & tests from Greg Kurz.
- A fix from Nish Aravamudan to reduce memory usage by clamping
nodes_possible_map.
- Support for pstore on powernv from Hari Bathini.
- Removal of old powerpc specific byte swap routines by David Gibson.
- Fix from Vasant Hegde to prevent the flash driver telling you it was
flashing your firmware when it wasn't.
- Patch from Ben Herrenschmidt to add an OPAL heartbeat driver.
- Fix for an oops causing get/put_cpu_var() imbalance in perf by Jan
Stancek.
- Some fixes for migration from Tyrel Datwyler.
- A new syscall to switch the cpu endian by Michael Ellerman.
- Large series from Wei Yang to implement SRIOV, reviewed and acked by
Bjorn.
- A fix for the OPAL sensor driver from Cédric Le Goater.
- Fixes to get STRICT_MM_TYPECHECKS building again by Michael Ellerman.
- Large series from Daniel Axtens to make our PCI hooks per PHB rather
than per machine.
- Small patch from Sam Bobroff to explicitly abort non-suspended
transactions on syscalls, plus a test to exercise it.
- Numerous reworks and fixes for the 24x7 PMU from Sukadev Bhattiprolu.
- Small patch to enable the hard lockup detector from Anton Blanchard.
- Fix from Dave Olson for missing L2 cache information on some CPUs.
- Some fixes from Michael Ellerman to get Cell machines booting again.
- Freescale updates from Scott: Highlights include BMan device tree
nodes, an MSI erratum workaround, a couple minor performance
improvements, config updates, and misc fixes/cleanup.
* tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (196 commits)
powerpc/powermac: Fix build error seen with powermac smp builds
powerpc/pseries: Fix compile of memory hotplug without CONFIG_MEMORY_HOTREMOVE
powerpc: Remove PPC32 code from pseries specific find_and_init_phbs()
powerpc/cell: Fix iommu breakage caused by controller_ops change
powerpc/eeh: Fix crash in eeh_add_device_early() on Cell
powerpc/perf: Cap 64bit userspace backtraces to PERF_MAX_STACK_DEPTH
powerpc/perf/hv-24x7: Fail 24x7 initcall if create_events_from_catalog() fails
powerpc/pseries: Correct memory hotplug locking
powerpc: Fix missing L2 cache size in /sys/devices/system/cpu
powerpc: Add ppc64 hard lockup detector support
oprofile: Disable oprofile NMI timer on ppc64
powerpc/perf/hv-24x7: Add missing put_cpu_var()
powerpc/perf/hv-24x7: Break up single_24x7_request
powerpc/perf/hv-24x7: Define update_event_count()
powerpc/perf/hv-24x7: Whitespace cleanup
powerpc/perf/hv-24x7: Define add_event_to_24x7_request()
powerpc/perf/hv-24x7: Rename hv_24x7_event_update
powerpc/perf/hv-24x7: Move debug prints to separate function
powerpc/perf/hv-24x7: Drop event_24x7_request()
powerpc/perf/hv-24x7: Use pr_devel() to log message
...
Conflicts:
tools/testing/selftests/powerpc/Makefile
tools/testing/selftests/powerpc/tm/Makefile
2015-04-17 02:53:32 +08:00
|
|
|
hardlockup_detector_disable();
|
2015-04-09 10:52:56 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_initcall(disable_hardlockup_detector);
|
|
|
|
#endif
|