2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/setup.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-2001 Russell King
|
|
|
|
*/
|
2015-09-25 04:49:52 +08:00
|
|
|
#include <linux/efi.h>
|
2011-07-22 22:58:34 +08:00
|
|
|
#include <linux/export.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/utsname.h>
|
|
|
|
#include <linux/initrd.h>
|
|
|
|
#include <linux/console.h>
|
|
|
|
#include <linux/seq_file.h>
|
2006-07-10 19:44:13 +08:00
|
|
|
#include <linux/screen_info.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/init.h>
|
2010-05-10 16:20:22 +08:00
|
|
|
#include <linux/kexec.h>
|
ARM: 9012/1: move device tree mapping out of linear region
On ARM, setting up the linear region is tricky, given the constraints
around placement and alignment of the memblocks, and how the kernel
itself as well as the DT are placed in physical memory.
Let's simplify matters a bit, by moving the device tree mapping to the
top of the address space, right between the end of the vmalloc region
and the start of the the fixmap region, and create a read-only mapping
for it that is independent of the size of the linear region, and how it
is organized.
Since this region was formerly used as a guard region, which will now be
populated fully on LPAE builds by this read-only mapping (which will
still be able to function as a guard region for stray writes), bump the
start of the [underutilized] fixmap region by 512 KB as well, to ensure
that there is always a proper guard region here. Doing so still leaves
ample room for the fixmap space, even with NR_CPUS set to its maximum
value of 32.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2020-10-11 17:21:37 +08:00
|
|
|
#include <linux/libfdt.h>
|
2023-08-23 06:58:18 +08:00
|
|
|
#include <linux/of.h>
|
2011-04-29 04:27:21 +08:00
|
|
|
#include <linux/of_fdt.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/interrupt.h>
|
2006-02-16 19:08:09 +08:00
|
|
|
#include <linux/smp.h>
|
2010-01-11 01:23:29 +08:00
|
|
|
#include <linux/proc_fs.h>
|
2010-07-09 23:27:52 +08:00
|
|
|
#include <linux/memblock.h>
|
2011-08-20 00:58:35 +08:00
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <linux/compiler.h>
|
2011-08-26 07:10:29 +08:00
|
|
|
#include <linux/sort.h>
|
2015-07-31 22:46:19 +08:00
|
|
|
#include <linux/psci.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-07-24 19:32:54 +08:00
|
|
|
#include <asm/unified.h>
|
2012-03-29 01:30:01 +08:00
|
|
|
#include <asm/cp15.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/cpu.h>
|
2008-08-11 01:08:10 +08:00
|
|
|
#include <asm/cputype.h>
|
2015-09-25 04:49:52 +08:00
|
|
|
#include <asm/efi.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/elf.h>
|
2015-09-01 14:59:28 +08:00
|
|
|
#include <asm/early_ioremap.h>
|
2015-08-13 07:01:52 +08:00
|
|
|
#include <asm/fixmap.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/procinfo.h>
|
2013-05-21 22:24:11 +08:00
|
|
|
#include <asm/psci.h>
|
2008-12-01 19:53:07 +08:00
|
|
|
#include <asm/sections.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/setup.h>
|
2010-09-04 17:47:48 +08:00
|
|
|
#include <asm/smp_plat.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/mach-types.h>
|
|
|
|
#include <asm/cacheflush.h>
|
2008-08-11 01:10:19 +08:00
|
|
|
#include <asm/cachetype.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/tlbflush.h>
|
2015-05-06 22:13:31 +08:00
|
|
|
#include <asm/xen/hypervisor.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-04-29 04:27:21 +08:00
|
|
|
#include <asm/prom.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/mach/arch.h>
|
|
|
|
#include <asm/mach/irq.h>
|
|
|
|
#include <asm/mach/time.h>
|
2012-03-29 01:30:01 +08:00
|
|
|
#include <asm/system_info.h>
|
|
|
|
#include <asm/system_misc.h>
|
2008-02-21 03:33:40 +08:00
|
|
|
#include <asm/traps.h>
|
2009-02-16 18:41:36 +08:00
|
|
|
#include <asm/unwind.h>
|
2011-12-09 02:22:06 +08:00
|
|
|
#include <asm/memblock.h>
|
2012-02-18 00:54:28 +08:00
|
|
|
#include <asm/virt.h>
|
2020-10-26 06:55:16 +08:00
|
|
|
#include <asm/kasan.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-02 07:56:46 +08:00
|
|
|
#include "atags.h"
|
2006-03-16 07:17:30 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
|
|
|
|
char fpe_type[8];
|
|
|
|
|
|
|
|
static int __init fpe_setup(char *line)
|
|
|
|
{
|
|
|
|
memcpy(fpe_type, line, 8);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("fpe=", fpe_setup);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
unsigned int processor_id;
|
2007-12-18 10:53:27 +08:00
|
|
|
EXPORT_SYMBOL(processor_id);
|
2010-12-05 01:45:55 +08:00
|
|
|
unsigned int __machine_arch_type __read_mostly;
|
2005-04-17 06:20:36 +08:00
|
|
|
EXPORT_SYMBOL(__machine_arch_type);
|
2010-12-05 01:45:55 +08:00
|
|
|
unsigned int cacheid __read_mostly;
|
2008-09-25 22:35:28 +08:00
|
|
|
EXPORT_SYMBOL(cacheid);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-06-01 05:02:22 +08:00
|
|
|
unsigned int __atags_pointer __initdata;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int system_rev;
|
|
|
|
EXPORT_SYMBOL(system_rev);
|
|
|
|
|
2015-05-06 22:23:56 +08:00
|
|
|
const char *system_serial;
|
|
|
|
EXPORT_SYMBOL(system_serial);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int system_serial_low;
|
|
|
|
EXPORT_SYMBOL(system_serial_low);
|
|
|
|
|
|
|
|
unsigned int system_serial_high;
|
|
|
|
EXPORT_SYMBOL(system_serial_high);
|
|
|
|
|
2010-12-05 01:45:55 +08:00
|
|
|
unsigned int elf_hwcap __read_mostly;
|
2005-04-17 06:20:36 +08:00
|
|
|
EXPORT_SYMBOL(elf_hwcap);
|
|
|
|
|
2014-02-20 05:28:40 +08:00
|
|
|
unsigned int elf_hwcap2 __read_mostly;
|
|
|
|
EXPORT_SYMBOL(elf_hwcap2);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#ifdef MULTI_CPU
|
2016-08-11 05:46:49 +08:00
|
|
|
struct processor processor __ro_after_init;
|
2018-07-19 19:21:31 +08:00
|
|
|
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
|
|
struct processor *cpu_vtable[NR_CPUS] = {
|
|
|
|
[0] = &processor,
|
|
|
|
};
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
#ifdef MULTI_TLB
|
2016-08-11 05:46:49 +08:00
|
|
|
struct cpu_tlb_fns cpu_tlb __ro_after_init;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
#ifdef MULTI_USER
|
2016-08-11 05:46:49 +08:00
|
|
|
struct cpu_user_fns cpu_user __ro_after_init;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
#ifdef MULTI_CACHE
|
2016-08-11 05:46:49 +08:00
|
|
|
struct cpu_cache_fns cpu_cache __ro_after_init;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2007-02-05 21:48:08 +08:00
|
|
|
#ifdef CONFIG_OUTER_CACHE
|
2016-08-11 05:46:49 +08:00
|
|
|
struct outer_cache_fns outer_cache __ro_after_init;
|
2010-02-16 14:57:43 +08:00
|
|
|
EXPORT_SYMBOL(outer_cache);
|
2007-02-05 21:48:08 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-08-20 00:58:35 +08:00
|
|
|
/*
|
|
|
|
* Cached cpu_architecture() result for use by assembler code.
|
|
|
|
* C code should use the cpu_architecture() function instead of accessing this
|
|
|
|
* variable directly.
|
|
|
|
*/
|
|
|
|
int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
|
|
|
|
|
2005-06-01 05:22:32 +08:00
|
|
|
struct stack {
|
2021-09-23 15:15:53 +08:00
|
|
|
u32 irq[4];
|
|
|
|
u32 abt[4];
|
|
|
|
u32 und[4];
|
|
|
|
u32 fiq[4];
|
2005-06-01 05:22:32 +08:00
|
|
|
} ____cacheline_aligned;
|
|
|
|
|
2010-05-22 01:06:41 +08:00
|
|
|
#ifndef CONFIG_CPU_V7M
|
2005-06-01 05:22:32 +08:00
|
|
|
static struct stack stacks[NR_CPUS];
|
2010-05-22 01:06:41 +08:00
|
|
|
#endif
|
2005-06-01 05:22:32 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
char elf_platform[ELF_PLATFORM_SIZE];
|
|
|
|
EXPORT_SYMBOL(elf_platform);
|
|
|
|
|
|
|
|
static const char *cpu_name;
|
|
|
|
static const char *machine_name;
|
2010-01-27 08:13:31 +08:00
|
|
|
static char __initdata cmd_line[COMMAND_LINE_SIZE];
|
2013-07-26 21:55:59 +08:00
|
|
|
const struct machine_desc *machine_desc __initdata;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
|
|
|
|
#define ENDIANNESS ((char)endian_test.l)
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Standard memory resources
|
|
|
|
*/
|
|
|
|
static struct resource mem_res[] = {
|
2006-06-13 05:47:06 +08:00
|
|
|
{
|
|
|
|
.name = "Video RAM",
|
|
|
|
.start = 0,
|
|
|
|
.end = 0,
|
|
|
|
.flags = IORESOURCE_MEM
|
|
|
|
},
|
|
|
|
{
|
2012-01-18 08:57:21 +08:00
|
|
|
.name = "Kernel code",
|
2006-06-13 05:47:06 +08:00
|
|
|
.start = 0,
|
|
|
|
.end = 0,
|
2016-01-27 04:57:22 +08:00
|
|
|
.flags = IORESOURCE_SYSTEM_RAM
|
2006-06-13 05:47:06 +08:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "Kernel data",
|
|
|
|
.start = 0,
|
|
|
|
.end = 0,
|
2016-01-27 04:57:22 +08:00
|
|
|
.flags = IORESOURCE_SYSTEM_RAM
|
2006-06-13 05:47:06 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define video_ram mem_res[0]
|
|
|
|
#define kernel_code mem_res[1]
|
|
|
|
#define kernel_data mem_res[2]
|
|
|
|
|
|
|
|
static struct resource io_res[] = {
|
2006-06-13 05:47:06 +08:00
|
|
|
{
|
|
|
|
.name = "reserved",
|
|
|
|
.start = 0x3bc,
|
|
|
|
.end = 0x3be,
|
|
|
|
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "reserved",
|
|
|
|
.start = 0x378,
|
|
|
|
.end = 0x37f,
|
|
|
|
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.name = "reserved",
|
|
|
|
.start = 0x278,
|
|
|
|
.end = 0x27f,
|
|
|
|
.flags = IORESOURCE_IO | IORESOURCE_BUSY
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define lp0 io_res[0]
|
|
|
|
#define lp1 io_res[1]
|
|
|
|
#define lp2 io_res[2]
|
|
|
|
|
|
|
|
static const char *proc_arch[] = {
|
|
|
|
"undefined/unknown",
|
|
|
|
"3",
|
|
|
|
"4",
|
|
|
|
"4T",
|
|
|
|
"5",
|
|
|
|
"5T",
|
|
|
|
"5TE",
|
|
|
|
"5TEJ",
|
|
|
|
"6TEJ",
|
2006-01-13 00:28:16 +08:00
|
|
|
"7",
|
2010-05-22 01:06:41 +08:00
|
|
|
"7M",
|
2005-04-17 06:20:36 +08:00
|
|
|
"?(12)",
|
|
|
|
"?(13)",
|
|
|
|
"?(14)",
|
|
|
|
"?(15)",
|
|
|
|
"?(16)",
|
|
|
|
"?(17)",
|
|
|
|
};
|
|
|
|
|
2010-05-22 01:06:41 +08:00
|
|
|
#ifdef CONFIG_CPU_V7M
|
|
|
|
static int __get_cpu_architecture(void)
|
|
|
|
{
|
|
|
|
return CPU_ARCH_ARMv7M;
|
|
|
|
}
|
|
|
|
#else
|
2011-08-20 00:58:35 +08:00
|
|
|
static int __get_cpu_architecture(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int cpu_arch;
|
|
|
|
|
2008-08-11 01:08:10 +08:00
|
|
|
if ((read_cpuid_id() & 0x0008f000) == 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
cpu_arch = CPU_ARCH_UNKNOWN;
|
2008-08-11 01:08:10 +08:00
|
|
|
} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
|
|
|
|
cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
|
|
|
|
} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
|
|
|
|
cpu_arch = (read_cpuid_id() >> 16) & 7;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (cpu_arch)
|
|
|
|
cpu_arch += CPU_ARCH_ARMv3;
|
2008-08-11 01:08:10 +08:00
|
|
|
} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
|
2007-09-25 23:49:45 +08:00
|
|
|
/* Revised CPUID format. Read the Memory Model Feature
|
|
|
|
* Register 0 and check for VMSAv7 or PMSAv7 */
|
2015-03-18 04:37:25 +08:00
|
|
|
unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
|
2011-02-16 01:06:57 +08:00
|
|
|
if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
|
|
|
|
(mmfr0 & 0x000000f0) >= 0x00000030)
|
2007-09-25 23:49:45 +08:00
|
|
|
cpu_arch = CPU_ARCH_ARMv7;
|
|
|
|
else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
|
|
|
|
(mmfr0 & 0x000000f0) == 0x00000020)
|
|
|
|
cpu_arch = CPU_ARCH_ARMv6;
|
|
|
|
else
|
|
|
|
cpu_arch = CPU_ARCH_UNKNOWN;
|
|
|
|
} else
|
|
|
|
cpu_arch = CPU_ARCH_UNKNOWN;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return cpu_arch;
|
|
|
|
}
|
2010-05-22 01:06:41 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-08-20 00:58:35 +08:00
|
|
|
int __pure cpu_architecture(void)
|
|
|
|
{
|
|
|
|
BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
|
|
|
|
|
|
|
|
return __cpu_architecture;
|
|
|
|
}
|
|
|
|
|
2010-09-13 23:18:30 +08:00
|
|
|
static int cpu_has_aliasing_icache(unsigned int arch)
|
|
|
|
{
|
|
|
|
int aliasing_icache;
|
|
|
|
unsigned int id_reg, num_sets, line_size;
|
|
|
|
|
2011-08-24 05:22:11 +08:00
|
|
|
/* PIPT caches never alias. */
|
|
|
|
if (icache_is_pipt())
|
|
|
|
return 0;
|
|
|
|
|
2010-09-13 23:18:30 +08:00
|
|
|
/* arch specifies the register format */
|
|
|
|
switch (arch) {
|
|
|
|
case CPU_ARCH_ARMv7:
|
2016-08-31 00:24:34 +08:00
|
|
|
set_csselr(CSSELR_ICACHE | CSSELR_L1);
|
2010-10-06 18:07:28 +08:00
|
|
|
isb();
|
2016-08-31 00:24:34 +08:00
|
|
|
id_reg = read_ccsidr();
|
2010-09-13 23:18:30 +08:00
|
|
|
line_size = 4 << ((id_reg & 0x7) + 2);
|
|
|
|
num_sets = ((id_reg >> 13) & 0x7fff) + 1;
|
|
|
|
aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
|
|
|
|
break;
|
|
|
|
case CPU_ARCH_ARMv6:
|
|
|
|
aliasing_icache = read_cpuid_cachetype() & (1 << 11);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* I-cache aliases will be handled by D-cache aliasing code */
|
|
|
|
aliasing_icache = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return aliasing_icache;
|
|
|
|
}
|
|
|
|
|
2008-09-25 22:35:28 +08:00
|
|
|
static void __init cacheid_init(void)
|
|
|
|
{
|
|
|
|
unsigned int arch = cpu_architecture();
|
|
|
|
|
2016-08-31 00:27:19 +08:00
|
|
|
if (arch >= CPU_ARCH_ARMv6) {
|
2013-01-31 00:38:21 +08:00
|
|
|
unsigned int cachetype = read_cpuid_cachetype();
|
2016-08-31 00:27:19 +08:00
|
|
|
|
2017-06-12 20:35:52 +08:00
|
|
|
if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
|
2016-08-31 00:27:19 +08:00
|
|
|
cacheid = 0;
|
|
|
|
} else if ((cachetype & (7 << 29)) == 4 << 29) {
|
2009-03-03 18:44:12 +08:00
|
|
|
/* ARMv7 register format */
|
2011-08-03 19:37:04 +08:00
|
|
|
arch = CPU_ARCH_ARMv7;
|
2009-03-03 18:44:12 +08:00
|
|
|
cacheid = CACHEID_VIPT_NONALIASING;
|
2011-08-24 05:22:11 +08:00
|
|
|
switch (cachetype & (3 << 14)) {
|
|
|
|
case (1 << 14):
|
2009-03-03 18:44:12 +08:00
|
|
|
cacheid |= CACHEID_ASID_TAGGED;
|
2011-08-24 05:22:11 +08:00
|
|
|
break;
|
|
|
|
case (3 << 14):
|
|
|
|
cacheid |= CACHEID_PIPT;
|
|
|
|
break;
|
|
|
|
}
|
2010-09-13 23:18:30 +08:00
|
|
|
} else {
|
2011-08-03 19:37:04 +08:00
|
|
|
arch = CPU_ARCH_ARMv6;
|
|
|
|
if (cachetype & (1 << 23))
|
|
|
|
cacheid = CACHEID_VIPT_ALIASING;
|
|
|
|
else
|
|
|
|
cacheid = CACHEID_VIPT_NONALIASING;
|
2010-09-13 23:18:30 +08:00
|
|
|
}
|
2011-08-03 19:37:04 +08:00
|
|
|
if (cpu_has_aliasing_icache(arch))
|
|
|
|
cacheid |= CACHEID_VIPT_I_ALIASING;
|
2008-09-25 22:35:28 +08:00
|
|
|
} else {
|
|
|
|
cacheid = CACHEID_VIVT;
|
|
|
|
}
|
2008-09-25 22:39:20 +08:00
|
|
|
|
2013-12-06 01:29:35 +08:00
|
|
|
pr_info("CPU: %s data cache, %s instruction cache\n",
|
2008-09-25 22:39:20 +08:00
|
|
|
cache_is_vivt() ? "VIVT" :
|
|
|
|
cache_is_vipt_aliasing() ? "VIPT aliasing" :
|
2011-08-24 05:22:11 +08:00
|
|
|
cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
|
2008-09-25 22:39:20 +08:00
|
|
|
cache_is_vivt() ? "VIVT" :
|
|
|
|
icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
|
2010-09-13 23:18:30 +08:00
|
|
|
icache_is_vipt_aliasing() ? "VIPT aliasing" :
|
2011-08-24 05:22:11 +08:00
|
|
|
icache_is_pipt() ? "PIPT" :
|
2008-09-25 22:39:20 +08:00
|
|
|
cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
|
2008-09-25 22:35:28 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* These functions re-use the assembly code in head.S, which
|
|
|
|
* already provide the required functionality.
|
|
|
|
*/
|
2006-02-25 05:04:56 +08:00
|
|
|
extern struct proc_info_list *lookup_processor_type(unsigned int);
|
2011-01-13 01:50:42 +08:00
|
|
|
|
2011-04-29 04:27:21 +08:00
|
|
|
void __init early_print(const char *str, ...)
|
2011-01-13 01:50:42 +08:00
|
|
|
{
|
|
|
|
extern void printascii(const char *);
|
|
|
|
char buf[256];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, str);
|
|
|
|
vsnprintf(buf, sizeof(buf), str, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LL
|
|
|
|
printascii(buf);
|
|
|
|
#endif
|
|
|
|
printk("%s", buf);
|
|
|
|
}
|
|
|
|
|
2015-12-12 09:49:21 +08:00
|
|
|
#ifdef CONFIG_ARM_PATCH_IDIV
|
|
|
|
|
|
|
|
static inline u32 __attribute_const__ sdiv_instruction(void)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
|
|
|
|
/* "sdiv r0, r0, r1" */
|
|
|
|
u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
|
|
|
|
return __opcode_to_mem_thumb32(insn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* "sdiv r0, r0, r1" */
|
|
|
|
return __opcode_to_mem_arm(0xe710f110);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 __attribute_const__ udiv_instruction(void)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
|
|
|
|
/* "udiv r0, r0, r1" */
|
|
|
|
u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
|
|
|
|
return __opcode_to_mem_thumb32(insn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* "udiv r0, r0, r1" */
|
|
|
|
return __opcode_to_mem_arm(0xe730f110);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 __attribute_const__ bx_lr_instruction(void)
|
|
|
|
{
|
|
|
|
if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
|
|
|
|
/* "bx lr; nop" */
|
|
|
|
u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
|
|
|
|
return __opcode_to_mem_thumb32(insn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* "bx lr" */
|
|
|
|
return __opcode_to_mem_arm(0xe12fff1e);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init patch_aeabi_idiv(void)
|
|
|
|
{
|
|
|
|
extern void __aeabi_uidiv(void);
|
|
|
|
extern void __aeabi_idiv(void);
|
|
|
|
uintptr_t fn_addr;
|
|
|
|
unsigned int mask;
|
|
|
|
|
|
|
|
mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
|
|
|
|
if (!(elf_hwcap & mask))
|
|
|
|
return;
|
|
|
|
|
|
|
|
pr_info("CPU: div instructions available: patching division code\n");
|
|
|
|
|
|
|
|
fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
|
2016-03-14 09:55:45 +08:00
|
|
|
asm ("" : "+g" (fn_addr));
|
2015-12-12 09:49:21 +08:00
|
|
|
((u32 *)fn_addr)[0] = udiv_instruction();
|
|
|
|
((u32 *)fn_addr)[1] = bx_lr_instruction();
|
|
|
|
flush_icache_range(fn_addr, fn_addr + 8);
|
|
|
|
|
|
|
|
fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
|
2016-03-14 09:55:45 +08:00
|
|
|
asm ("" : "+g" (fn_addr));
|
2015-12-12 09:49:21 +08:00
|
|
|
((u32 *)fn_addr)[0] = sdiv_instruction();
|
|
|
|
((u32 *)fn_addr)[1] = bx_lr_instruction();
|
|
|
|
flush_icache_range(fn_addr, fn_addr + 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
static inline void patch_aeabi_idiv(void) { }
|
|
|
|
#endif
|
|
|
|
|
2013-03-19 02:44:15 +08:00
|
|
|
static void __init cpuid_init_hwcaps(void)
|
|
|
|
{
|
2015-03-20 02:03:25 +08:00
|
|
|
int block;
|
2015-03-20 02:04:05 +08:00
|
|
|
u32 isar5;
|
2022-11-17 14:05:19 +08:00
|
|
|
u32 isar6;
|
2022-11-17 14:10:35 +08:00
|
|
|
u32 pfr2;
|
2013-03-19 02:44:15 +08:00
|
|
|
|
|
|
|
if (cpu_architecture() < CPU_ARCH_ARMv7)
|
|
|
|
return;
|
|
|
|
|
2015-03-20 02:03:25 +08:00
|
|
|
block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
|
|
|
|
if (block >= 2)
|
2013-03-19 02:44:15 +08:00
|
|
|
elf_hwcap |= HWCAP_IDIVA;
|
2015-03-20 02:03:25 +08:00
|
|
|
if (block >= 1)
|
2013-03-19 02:44:15 +08:00
|
|
|
elf_hwcap |= HWCAP_IDIVT;
|
2013-04-09 00:13:12 +08:00
|
|
|
|
|
|
|
/* LPAE implies atomic ldrd/strd instructions */
|
2015-03-20 02:03:25 +08:00
|
|
|
block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
|
|
|
|
if (block >= 5)
|
2013-04-09 00:13:12 +08:00
|
|
|
elf_hwcap |= HWCAP_LPAE;
|
2015-03-20 02:04:05 +08:00
|
|
|
|
|
|
|
/* check for supported v8 Crypto instructions */
|
|
|
|
isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
|
|
|
|
|
|
|
|
block = cpuid_feature_extract_field(isar5, 4);
|
|
|
|
if (block >= 2)
|
|
|
|
elf_hwcap2 |= HWCAP2_PMULL;
|
|
|
|
if (block >= 1)
|
|
|
|
elf_hwcap2 |= HWCAP2_AES;
|
|
|
|
|
|
|
|
block = cpuid_feature_extract_field(isar5, 8);
|
|
|
|
if (block >= 1)
|
|
|
|
elf_hwcap2 |= HWCAP2_SHA1;
|
|
|
|
|
|
|
|
block = cpuid_feature_extract_field(isar5, 12);
|
|
|
|
if (block >= 1)
|
|
|
|
elf_hwcap2 |= HWCAP2_SHA2;
|
|
|
|
|
|
|
|
block = cpuid_feature_extract_field(isar5, 16);
|
|
|
|
if (block >= 1)
|
|
|
|
elf_hwcap2 |= HWCAP2_CRC32;
|
2022-11-17 14:05:19 +08:00
|
|
|
|
|
|
|
/* Check for Speculation barrier instruction */
|
|
|
|
isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
|
|
|
|
block = cpuid_feature_extract_field(isar6, 12);
|
|
|
|
if (block >= 1)
|
|
|
|
elf_hwcap2 |= HWCAP2_SB;
|
2022-11-17 14:10:35 +08:00
|
|
|
|
|
|
|
/* Check for Speculative Store Bypassing control */
|
|
|
|
pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
|
|
|
|
block = cpuid_feature_extract_field(pfr2, 4);
|
|
|
|
if (block >= 1)
|
|
|
|
elf_hwcap2 |= HWCAP2_SSBS;
|
2013-03-19 02:44:15 +08:00
|
|
|
}
|
|
|
|
|
2014-07-04 23:41:21 +08:00
|
|
|
static void __init elf_hwcap_fixup(void)
|
2010-07-05 21:53:10 +08:00
|
|
|
{
|
2014-07-04 23:41:21 +08:00
|
|
|
unsigned id = read_cpuid_id();
|
2010-07-05 21:53:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* HWCAP_TLS is available only on 1136 r1p0 and later,
|
|
|
|
* see also kuser_get_tls_init.
|
|
|
|
*/
|
2014-07-04 23:41:21 +08:00
|
|
|
if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
|
|
|
|
((id >> 20) & 3) == 0) {
|
2010-07-05 21:53:10 +08:00
|
|
|
elf_hwcap &= ~HWCAP_TLS;
|
2014-07-04 23:41:21 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify if CPUID scheme is implemented */
|
|
|
|
if ((id & 0x000f0000) != 0x000f0000)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the CPU supports LDREX/STREX and LDREXB/STREXB,
|
|
|
|
* avoid advertising SWP; it may not be atomic with
|
|
|
|
* multiprocessing cores.
|
|
|
|
*/
|
2015-03-20 02:03:25 +08:00
|
|
|
if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
|
|
|
|
(cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
|
2016-04-19 19:35:20 +08:00
|
|
|
cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
|
2014-07-04 23:41:21 +08:00
|
|
|
elf_hwcap &= ~HWCAP_SWP;
|
2010-07-05 21:53:10 +08:00
|
|
|
}
|
|
|
|
|
2005-06-01 05:22:32 +08:00
|
|
|
/*
|
|
|
|
* cpu_init - initialise one CPU.
|
|
|
|
*
|
2008-09-25 21:45:02 +08:00
|
|
|
* cpu_init sets up the per-CPU stacks.
|
2005-06-01 05:22:32 +08:00
|
|
|
*/
|
2013-04-25 21:40:22 +08:00
|
|
|
void notrace cpu_init(void)
|
2005-06-01 05:22:32 +08:00
|
|
|
{
|
2010-05-22 01:06:41 +08:00
|
|
|
#ifndef CONFIG_CPU_V7M
|
2005-06-01 05:22:32 +08:00
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
struct stack *stk = &stacks[cpu];
|
|
|
|
|
|
|
|
if (cpu >= NR_CPUS) {
|
2013-12-06 01:29:35 +08:00
|
|
|
pr_crit("CPU%u: bad primary CPU number\n", cpu);
|
2005-06-01 05:22:32 +08:00
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2012-11-30 03:39:54 +08:00
|
|
|
/*
|
|
|
|
* This only works on resume and secondary cores. For booting on the
|
|
|
|
* boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
|
|
|
|
*/
|
|
|
|
set_my_cpu_offset(per_cpu_offset(cpu));
|
|
|
|
|
2011-06-22 01:57:31 +08:00
|
|
|
cpu_proc_init();
|
|
|
|
|
2009-07-24 19:32:54 +08:00
|
|
|
/*
|
|
|
|
* Define the placement constraint for the inline asm directive below.
|
|
|
|
* In Thumb-2, msr with an immediate value is not allowed.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
2021-05-14 18:26:37 +08:00
|
|
|
#define PLC_l "l"
|
|
|
|
#define PLC_r "r"
|
2009-07-24 19:32:54 +08:00
|
|
|
#else
|
2021-05-14 18:26:37 +08:00
|
|
|
#define PLC_l "I"
|
|
|
|
#define PLC_r "I"
|
2009-07-24 19:32:54 +08:00
|
|
|
#endif
|
|
|
|
|
2005-06-01 05:22:32 +08:00
|
|
|
/*
|
|
|
|
* setup stacks for re-entrant exception handlers
|
|
|
|
*/
|
|
|
|
__asm__ (
|
|
|
|
"msr cpsr_c, %1\n\t"
|
2009-07-24 19:32:54 +08:00
|
|
|
"add r14, %0, %2\n\t"
|
|
|
|
"mov sp, r14\n\t"
|
2005-06-01 05:22:32 +08:00
|
|
|
"msr cpsr_c, %3\n\t"
|
2009-07-24 19:32:54 +08:00
|
|
|
"add r14, %0, %4\n\t"
|
|
|
|
"mov sp, r14\n\t"
|
2005-06-01 05:22:32 +08:00
|
|
|
"msr cpsr_c, %5\n\t"
|
2009-07-24 19:32:54 +08:00
|
|
|
"add r14, %0, %6\n\t"
|
|
|
|
"mov sp, r14\n\t"
|
2014-09-18 00:12:06 +08:00
|
|
|
"msr cpsr_c, %7\n\t"
|
|
|
|
"add r14, %0, %8\n\t"
|
|
|
|
"mov sp, r14\n\t"
|
|
|
|
"msr cpsr_c, %9"
|
2005-06-01 05:22:32 +08:00
|
|
|
:
|
|
|
|
: "r" (stk),
|
2021-05-14 18:26:37 +08:00
|
|
|
PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
|
2005-06-01 05:22:32 +08:00
|
|
|
"I" (offsetof(struct stack, irq[0])),
|
2021-05-14 18:26:37 +08:00
|
|
|
PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
|
2005-06-01 05:22:32 +08:00
|
|
|
"I" (offsetof(struct stack, abt[0])),
|
2021-05-14 18:26:37 +08:00
|
|
|
PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
|
2005-06-01 05:22:32 +08:00
|
|
|
"I" (offsetof(struct stack, und[0])),
|
2021-05-14 18:26:37 +08:00
|
|
|
PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
|
2014-09-18 00:12:06 +08:00
|
|
|
"I" (offsetof(struct stack, fiq[0])),
|
2021-05-14 18:26:37 +08:00
|
|
|
PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
|
2005-06-29 22:34:39 +08:00
|
|
|
: "r14");
|
2010-05-22 01:06:41 +08:00
|
|
|
#endif
|
2005-06-01 05:22:32 +08:00
|
|
|
}
|
|
|
|
|
2013-06-19 17:40:48 +08:00
|
|
|
u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
|
2012-01-20 19:01:12 +08:00
|
|
|
|
|
|
|
void __init smp_setup_processor_id(void)
|
|
|
|
{
|
|
|
|
int i;
|
2012-11-09 02:05:56 +08:00
|
|
|
u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
|
|
|
|
u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
2012-01-20 19:01:12 +08:00
|
|
|
|
|
|
|
cpu_logical_map(0) = cpu;
|
2012-11-09 02:05:56 +08:00
|
|
|
for (i = 1; i < nr_cpu_ids; ++i)
|
2012-01-20 19:01:12 +08:00
|
|
|
cpu_logical_map(i) = i == cpu ? 0 : i;
|
|
|
|
|
2013-03-11 20:52:12 +08:00
|
|
|
/*
|
|
|
|
* clear __my_cpu_offset on boot CPU to avoid hang caused by
|
|
|
|
* using percpu variable early, for example, lockdep will
|
|
|
|
* access percpu variable inside lock_release
|
|
|
|
*/
|
|
|
|
set_my_cpu_offset(0);
|
|
|
|
|
2013-12-06 01:29:35 +08:00
|
|
|
pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
|
2012-01-20 19:01:12 +08:00
|
|
|
}
|
|
|
|
|
ARM: kernel: build MPIDR hash function data structure
On ARM SMP systems, cores are identified by their MPIDR register.
The MPIDR guidelines in the ARM ARM do not provide strict enforcement of
MPIDR layout, only recommendations that, if followed, split the MPIDR
on ARM 32 bit platforms in three affinity levels. In multi-cluster
systems like big.LITTLE, if the affinity guidelines are followed, the
MPIDR can not be considered an index anymore. This means that the
association between logical CPU in the kernel and the HW CPU identifier
becomes somewhat more complicated requiring methods like hashing to
associate a given MPIDR to a CPU logical index, in order for the look-up
to be carried out in an efficient and scalable way.
This patch provides a function in the kernel that starting from the
cpu_logical_map, implement collision-free hashing of MPIDR values by checking
all significative bits of MPIDR affinity level bitfields. The hashing
can then be carried out through bits shifting and ORing; the resulting
hash algorithm is a collision-free though not minimal hash that can be
executed with few assembly instructions. The mpidr is filtered through a
mpidr mask that is built by checking all bits that toggle in the set of
MPIDRs corresponding to possible CPUs. Bits that do not toggle do not carry
information so they do not contribute to the resulting hash.
Pseudo code:
/* check all bits that toggle, so they are required */
for (i = 1, mpidr_mask = 0; i < num_possible_cpus(); i++)
mpidr_mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
/*
* Build shifts to be applied to aff0, aff1, aff2 values to hash the mpidr
* fls() returns the last bit set in a word, 0 if none
* ffs() returns the first bit set in a word, 0 if none
*/
fs0 = mpidr_mask[7:0] ? ffs(mpidr_mask[7:0]) - 1 : 0;
fs1 = mpidr_mask[15:8] ? ffs(mpidr_mask[15:8]) - 1 : 0;
fs2 = mpidr_mask[23:16] ? ffs(mpidr_mask[23:16]) - 1 : 0;
ls0 = fls(mpidr_mask[7:0]);
ls1 = fls(mpidr_mask[15:8]);
ls2 = fls(mpidr_mask[23:16]);
bits0 = ls0 - fs0;
bits1 = ls1 - fs1;
bits2 = ls2 - fs2;
aff0_shift = fs0;
aff1_shift = 8 + fs1 - bits0;
aff2_shift = 16 + fs2 - (bits0 + bits1);
u32 hash(u32 mpidr) {
u32 l0, l1, l2;
u32 mpidr_masked = mpidr & mpidr_mask;
l0 = mpidr_masked & 0xff;
l1 = mpidr_masked & 0xff00;
l2 = mpidr_masked & 0xff0000;
return (l0 >> aff0_shift | l1 >> aff1_shift | l2 >> aff2_shift);
}
The hashing algorithm relies on the inherent properties set in the ARM ARM
recommendations for the MPIDR. Exotic configurations, where for instance the
MPIDR values at a given affinity level have large holes, can end up requiring
big hash tables since the compression of values that can be achieved through
shifting is somewhat crippled when holes are present. Kernel warns if
the number of buckets of the resulting hash table exceeds the number of
possible CPUs by a factor of 4, which is a symptom of a very sparse HW
MPIDR configuration.
The hash algorithm is quite simple and can easily be implemented in assembly
code, to be used in code paths where the kernel virtual address space is
not set-up (ie cpu_resume) and instruction and data fetches are strongly
ordered so code must be compact and must carry out few data accesses.
Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Colin Cross <ccross@android.com>
Cc: Santosh Shilimkar <santosh.shilimkar@ti.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Amit Kucheria <amit.kucheria@linaro.org>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Tested-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Stephen Warren <swarren@wwwdotorg.org>
2013-05-16 17:32:09 +08:00
|
|
|
struct mpidr_hash mpidr_hash;
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/**
|
|
|
|
* smp_build_mpidr_hash - Pre-compute shifts required at each affinity
|
|
|
|
* level in order to build a linear index from an
|
|
|
|
* MPIDR value. Resulting algorithm is a collision
|
|
|
|
* free hash carried out through shifting and ORing
|
|
|
|
*/
|
|
|
|
static void __init smp_build_mpidr_hash(void)
|
|
|
|
{
|
|
|
|
u32 i, affinity;
|
|
|
|
u32 fs[3], bits[3], ls, mask = 0;
|
|
|
|
/*
|
|
|
|
* Pre-scan the list of MPIDRS and filter out bits that do
|
|
|
|
* not contribute to affinity levels, ie they never toggle.
|
|
|
|
*/
|
|
|
|
for_each_possible_cpu(i)
|
|
|
|
mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
|
|
|
|
pr_debug("mask of set bits 0x%x\n", mask);
|
|
|
|
/*
|
|
|
|
* Find and stash the last and first bit set at all affinity levels to
|
|
|
|
* check how many bits are required to represent them.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
affinity = MPIDR_AFFINITY_LEVEL(mask, i);
|
|
|
|
/*
|
|
|
|
* Find the MSB bit and LSB bits position
|
|
|
|
* to determine how many bits are required
|
|
|
|
* to express the affinity level.
|
|
|
|
*/
|
|
|
|
ls = fls(affinity);
|
|
|
|
fs[i] = affinity ? ffs(affinity) - 1 : 0;
|
|
|
|
bits[i] = ls - fs[i];
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* An index can be created from the MPIDR by isolating the
|
|
|
|
* significant bits at each affinity level and by shifting
|
|
|
|
* them in order to compress the 24 bits values space to a
|
|
|
|
* compressed set of values. This is equivalent to hashing
|
|
|
|
* the MPIDR through shifting and ORing. It is a collision free
|
|
|
|
* hash though not minimal since some levels might contain a number
|
|
|
|
* of CPUs that is not an exact power of 2 and their bit
|
|
|
|
* representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
|
|
|
|
*/
|
|
|
|
mpidr_hash.shift_aff[0] = fs[0];
|
|
|
|
mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
|
|
|
|
mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
|
|
|
|
(bits[1] + bits[0]);
|
|
|
|
mpidr_hash.mask = mask;
|
|
|
|
mpidr_hash.bits = bits[2] + bits[1] + bits[0];
|
|
|
|
pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
|
|
|
|
mpidr_hash.shift_aff[0],
|
|
|
|
mpidr_hash.shift_aff[1],
|
|
|
|
mpidr_hash.shift_aff[2],
|
|
|
|
mpidr_hash.mask,
|
|
|
|
mpidr_hash.bits);
|
|
|
|
/*
|
|
|
|
* 4x is an arbitrary value used to warn on a hash table much bigger
|
|
|
|
* than expected on most systems.
|
|
|
|
*/
|
|
|
|
if (mpidr_hash_size() > 4 * num_possible_cpus())
|
|
|
|
pr_warn("Large number of MPIDR hash buckets detected\n");
|
|
|
|
sync_cache_w(&mpidr_hash);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-07-19 18:59:56 +08:00
|
|
|
/*
|
|
|
|
* locate processor in the list of supported processor types. The linker
|
|
|
|
* builds this table for us from the entries in arch/arm/mm/proc-*.S
|
|
|
|
*/
|
|
|
|
struct proc_info_list *lookup_processor(u32 midr)
|
2011-06-22 01:57:31 +08:00
|
|
|
{
|
2018-07-19 18:59:56 +08:00
|
|
|
struct proc_info_list *list = lookup_processor_type(midr);
|
2011-06-22 01:57:31 +08:00
|
|
|
|
|
|
|
if (!list) {
|
2018-07-19 18:59:56 +08:00
|
|
|
pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
|
|
|
|
smp_processor_id(), midr);
|
|
|
|
while (1)
|
|
|
|
/* can't use cpu_relax() here as it may require MMU setup */;
|
2011-06-22 01:57:31 +08:00
|
|
|
}
|
|
|
|
|
2018-07-19 18:59:56 +08:00
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init setup_processor(void)
|
|
|
|
{
|
|
|
|
unsigned int midr = read_cpuid_id();
|
|
|
|
struct proc_info_list *list = lookup_processor(midr);
|
|
|
|
|
2011-06-22 01:57:31 +08:00
|
|
|
cpu_name = list->cpu_name;
|
2011-08-20 00:58:35 +08:00
|
|
|
__cpu_architecture = __get_cpu_architecture();
|
2011-06-22 01:57:31 +08:00
|
|
|
|
2018-07-19 19:17:38 +08:00
|
|
|
init_proc_vtable(list->proc);
|
2011-06-22 01:57:31 +08:00
|
|
|
#ifdef MULTI_TLB
|
|
|
|
cpu_tlb = *list->tlb;
|
|
|
|
#endif
|
|
|
|
#ifdef MULTI_USER
|
|
|
|
cpu_user = *list->user;
|
|
|
|
#endif
|
|
|
|
#ifdef MULTI_CACHE
|
|
|
|
cpu_cache = *list->cache;
|
|
|
|
#endif
|
|
|
|
|
2013-12-06 01:29:35 +08:00
|
|
|
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
|
2018-07-19 18:59:56 +08:00
|
|
|
list->cpu_name, midr, midr & 15,
|
2014-04-14 01:47:34 +08:00
|
|
|
proc_arch[cpu_architecture()], get_cr());
|
2011-06-22 01:57:31 +08:00
|
|
|
|
2011-11-11 18:35:58 +08:00
|
|
|
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
|
|
|
|
list->arch_name, ENDIANNESS);
|
|
|
|
snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
|
|
|
|
list->elf_name, ENDIANNESS);
|
2011-06-22 01:57:31 +08:00
|
|
|
elf_hwcap = list->elf_hwcap;
|
2013-03-19 02:44:15 +08:00
|
|
|
|
|
|
|
cpuid_init_hwcaps();
|
2015-12-12 09:49:21 +08:00
|
|
|
patch_aeabi_idiv();
|
2013-03-19 02:44:15 +08:00
|
|
|
|
2011-06-22 01:57:31 +08:00
|
|
|
#ifndef CONFIG_ARM_THUMB
|
2013-03-19 02:44:14 +08:00
|
|
|
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
|
2011-06-22 01:57:31 +08:00
|
|
|
#endif
|
2014-05-28 03:34:28 +08:00
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
init_default_cache_policy(list->__cpu_mm_mmu_flags);
|
|
|
|
#endif
|
2013-10-10 00:26:44 +08:00
|
|
|
erratum_a15_798181_init();
|
|
|
|
|
2014-07-04 23:41:21 +08:00
|
|
|
elf_hwcap_fixup();
|
2011-06-22 01:57:31 +08:00
|
|
|
|
|
|
|
cacheid_init();
|
|
|
|
cpu_init();
|
|
|
|
}
|
|
|
|
|
2011-04-29 04:27:21 +08:00
|
|
|
void __init dump_machine_table(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-07-26 21:55:59 +08:00
|
|
|
const struct machine_desc *p;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-04-29 04:27:21 +08:00
|
|
|
early_print("Available machine support:\n\nID (hex)\tNAME\n");
|
|
|
|
for_each_machine_desc(p)
|
2011-02-21 14:00:32 +08:00
|
|
|
early_print("%08x\t%s\n", p->nr, p->name);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-21 14:00:32 +08:00
|
|
|
early_print("\nPlease check your kernel config and/or bootloader.\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-21 14:00:32 +08:00
|
|
|
while (true)
|
|
|
|
/* can't use cpu_relax() here as it may require MMU setup */;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-10-23 00:53:16 +08:00
|
|
|
int __init arm_add_memory(u64 start, u64 size)
|
2005-06-23 04:43:10 +08:00
|
|
|
{
|
2013-10-23 00:59:54 +08:00
|
|
|
u64 aligned_start;
|
2008-10-07 01:24:40 +08:00
|
|
|
|
2005-06-23 04:43:10 +08:00
|
|
|
/*
|
|
|
|
* Ensure that start/size are aligned to a page boundary.
|
2015-01-20 11:38:25 +08:00
|
|
|
* Size is rounded down, start is rounded up.
|
2005-06-23 04:43:10 +08:00
|
|
|
*/
|
2013-10-23 00:59:54 +08:00
|
|
|
aligned_start = PAGE_ALIGN(start);
|
2015-01-20 11:38:25 +08:00
|
|
|
if (aligned_start > start + size)
|
|
|
|
size = 0;
|
|
|
|
else
|
|
|
|
size -= aligned_start - start;
|
2012-04-13 00:15:08 +08:00
|
|
|
|
2018-04-03 22:24:20 +08:00
|
|
|
#ifndef CONFIG_PHYS_ADDR_T_64BIT
|
2013-10-23 00:59:54 +08:00
|
|
|
if (aligned_start > ULONG_MAX) {
|
2013-12-06 01:29:35 +08:00
|
|
|
pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
|
2020-11-10 23:58:41 +08:00
|
|
|
start);
|
2013-10-23 00:59:54 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (aligned_start + size > ULONG_MAX) {
|
2013-12-06 01:29:35 +08:00
|
|
|
pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
|
|
|
|
(long long)start);
|
2012-04-13 00:15:08 +08:00
|
|
|
/*
|
|
|
|
* To ensure bank->start + bank->size is representable in
|
|
|
|
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
|
|
|
|
* This means we lose a page after masking.
|
|
|
|
*/
|
2013-10-23 00:59:54 +08:00
|
|
|
size = ULONG_MAX - aligned_start;
|
2012-04-13 00:15:08 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-01-11 19:22:18 +08:00
|
|
|
if (aligned_start < PHYS_OFFSET) {
|
|
|
|
if (aligned_start + size <= PHYS_OFFSET) {
|
|
|
|
pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
|
|
|
|
aligned_start, aligned_start + size);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
|
|
|
|
aligned_start, (u64)PHYS_OFFSET);
|
|
|
|
|
|
|
|
size -= PHYS_OFFSET - aligned_start;
|
|
|
|
aligned_start = PHYS_OFFSET;
|
|
|
|
}
|
|
|
|
|
2014-04-14 05:54:58 +08:00
|
|
|
start = aligned_start;
|
|
|
|
size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
|
2008-10-07 01:24:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether this memory region has non-zero size or
|
|
|
|
* invalid node number.
|
|
|
|
*/
|
2014-04-14 05:54:58 +08:00
|
|
|
if (size == 0)
|
2008-10-07 01:24:40 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2014-04-14 05:54:58 +08:00
|
|
|
memblock_add(start, size);
|
2008-10-07 01:24:40 +08:00
|
|
|
return 0;
|
2005-06-23 04:43:10 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Pick out the memory size. We look for mem=size@start,
|
|
|
|
* where start and size are "size[KkMm]"
|
|
|
|
*/
|
2014-04-14 05:54:58 +08:00
|
|
|
|
2010-01-12 06:17:34 +08:00
|
|
|
static int __init early_mem(char *p)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
static int usermem __initdata = 0;
|
2013-10-23 00:53:16 +08:00
|
|
|
u64 size;
|
|
|
|
u64 start;
|
2010-01-12 06:17:34 +08:00
|
|
|
char *endp;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the user specifies memory size, we
|
|
|
|
* blow away any automatically generated
|
|
|
|
* size.
|
|
|
|
*/
|
|
|
|
if (usermem == 0) {
|
|
|
|
usermem = 1;
|
2014-04-14 05:54:58 +08:00
|
|
|
memblock_remove(memblock_start_of_DRAM(),
|
|
|
|
memblock_end_of_DRAM() - memblock_start_of_DRAM());
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
start = PHYS_OFFSET;
|
2010-01-12 06:17:34 +08:00
|
|
|
size = memparse(p, &endp);
|
|
|
|
if (*endp == '@')
|
|
|
|
start = memparse(endp + 1, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-04-21 04:41:18 +08:00
|
|
|
arm_add_memory(start, size);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-12 06:17:34 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-01-12 06:17:34 +08:00
|
|
|
early_param("mem", early_mem);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-07-26 21:55:59 +08:00
|
|
|
static void __init request_standard_resources(const struct machine_desc *mdesc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-10-14 07:58:08 +08:00
|
|
|
phys_addr_t start, end, res_end;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct resource *res;
|
2020-10-14 07:58:08 +08:00
|
|
|
u64 i;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-01 19:53:07 +08:00
|
|
|
kernel_code.start = virt_to_phys(_text);
|
2016-06-24 04:28:47 +08:00
|
|
|
kernel_code.end = virt_to_phys(__init_begin - 1);
|
2010-10-01 21:12:22 +08:00
|
|
|
kernel_data.start = virt_to_phys(_sdata);
|
2008-12-01 19:53:07 +08:00
|
|
|
kernel_data.end = virt_to_phys(_end - 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-10-14 07:58:08 +08:00
|
|
|
for_each_mem_range(i, &start, &end) {
|
2016-08-03 05:05:51 +08:00
|
|
|
unsigned long boot_alias_start;
|
|
|
|
|
2020-10-14 07:58:08 +08:00
|
|
|
/*
|
|
|
|
* In memblock, end points to the first byte after the
|
|
|
|
* range while in resourses, end points to the last byte in
|
|
|
|
* the range.
|
|
|
|
*/
|
|
|
|
res_end = end - 1;
|
|
|
|
|
2016-08-03 05:05:51 +08:00
|
|
|
/*
|
|
|
|
* Some systems have a special memory alias which is only
|
|
|
|
* used for booting. We need to advertise this region to
|
|
|
|
* kexec-tools so they know where bootable RAM is located.
|
|
|
|
*/
|
|
|
|
boot_alias_start = phys_to_idmap(start);
|
|
|
|
if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
|
memblock: stop using implicit alignment to SMP_CACHE_BYTES
When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.
Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.
Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.
For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.
The direct memblock APIs users were updated using the semantic patch below:
@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)
[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:57 +08:00
|
|
|
res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
|
2019-03-12 14:30:31 +08:00
|
|
|
if (!res)
|
|
|
|
panic("%s: Failed to allocate %zu bytes\n",
|
|
|
|
__func__, sizeof(*res));
|
2016-08-03 05:05:51 +08:00
|
|
|
res->name = "System RAM (boot alias)";
|
|
|
|
res->start = boot_alias_start;
|
2020-10-14 07:58:08 +08:00
|
|
|
res->end = phys_to_idmap(res_end);
|
2016-08-03 05:05:51 +08:00
|
|
|
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
|
|
|
request_resource(&iomem_resource, res);
|
|
|
|
}
|
|
|
|
|
memblock: stop using implicit alignment to SMP_CACHE_BYTES
When a memblock allocation APIs are called with align = 0, the alignment
is implicitly set to SMP_CACHE_BYTES.
Implicit alignment is done deep in the memblock allocator and it can
come as a surprise. Not that such an alignment would be wrong even
when used incorrectly but it is better to be explicit for the sake of
clarity and the prinicple of the least surprise.
Replace all such uses of memblock APIs with the 'align' parameter
explicitly set to SMP_CACHE_BYTES and stop implicit alignment assignment
in the memblock internal allocation functions.
For the case when memblock APIs are used via helper functions, e.g. like
iommu_arena_new_node() in Alpha, the helper functions were detected with
Coccinelle's help and then manually examined and updated where
appropriate.
The direct memblock APIs users were updated using the semantic patch below:
@@
expression size, min_addr, max_addr, nid;
@@
(
|
- memblock_alloc_try_nid_raw(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_raw(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid_nopanic(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, min_addr, max_addr,
nid)
|
- memblock_alloc_try_nid(size, 0, min_addr, max_addr, nid)
+ memblock_alloc_try_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid)
|
- memblock_alloc(size, 0)
+ memblock_alloc(size, SMP_CACHE_BYTES)
|
- memblock_alloc_raw(size, 0)
+ memblock_alloc_raw(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from(size, 0, min_addr)
+ memblock_alloc_from(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_nopanic(size, 0)
+ memblock_alloc_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low(size, 0)
+ memblock_alloc_low(size, SMP_CACHE_BYTES)
|
- memblock_alloc_low_nopanic(size, 0)
+ memblock_alloc_low_nopanic(size, SMP_CACHE_BYTES)
|
- memblock_alloc_from_nopanic(size, 0, min_addr)
+ memblock_alloc_from_nopanic(size, SMP_CACHE_BYTES, min_addr)
|
- memblock_alloc_node(size, 0, nid)
+ memblock_alloc_node(size, SMP_CACHE_BYTES, nid)
)
[mhocko@suse.com: changelog update]
[akpm@linux-foundation.org: coding-style fixes]
[rppt@linux.ibm.com: fix missed uses of implicit alignment]
Link: http://lkml.kernel.org/r/20181016133656.GA10925@rapoport-lnx
Link: http://lkml.kernel.org/r/1538687224-17535-1-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Paul Burton <paul.burton@mips.com> [MIPS]
Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc]
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:57 +08:00
|
|
|
res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
|
2019-03-12 14:30:31 +08:00
|
|
|
if (!res)
|
|
|
|
panic("%s: Failed to allocate %zu bytes\n", __func__,
|
|
|
|
sizeof(*res));
|
2005-04-17 06:20:36 +08:00
|
|
|
res->name = "System RAM";
|
2016-08-03 05:05:51 +08:00
|
|
|
res->start = start;
|
2020-10-14 07:58:08 +08:00
|
|
|
res->end = res_end;
|
2016-01-27 04:57:22 +08:00
|
|
|
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
request_resource(&iomem_resource, res);
|
|
|
|
|
|
|
|
if (kernel_code.start >= res->start &&
|
|
|
|
kernel_code.end <= res->end)
|
|
|
|
request_resource(res, &kernel_code);
|
|
|
|
if (kernel_data.start >= res->start &&
|
|
|
|
kernel_data.end <= res->end)
|
|
|
|
request_resource(res, &kernel_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mdesc->video_start) {
|
|
|
|
video_ram.start = mdesc->video_start;
|
|
|
|
video_ram.end = mdesc->video_end;
|
|
|
|
request_resource(&iomem_resource, &video_ram);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some machines don't have the possibility of ever
|
|
|
|
* possessing lp0, lp1 or lp2
|
|
|
|
*/
|
|
|
|
if (mdesc->reserve_lp0)
|
|
|
|
request_resource(&ioport_resource, &lp0);
|
|
|
|
if (mdesc->reserve_lp1)
|
|
|
|
request_resource(&ioport_resource, &lp1);
|
|
|
|
if (mdesc->reserve_lp2)
|
|
|
|
request_resource(&ioport_resource, &lp2);
|
|
|
|
}
|
|
|
|
|
2023-10-10 05:18:42 +08:00
|
|
|
#if defined(CONFIG_VGA_CONSOLE)
|
2023-10-17 17:39:47 +08:00
|
|
|
struct screen_info vgacon_screen_info = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.orig_video_lines = 30,
|
|
|
|
.orig_video_cols = 80,
|
|
|
|
.orig_video_mode = 0,
|
|
|
|
.orig_video_ega_bx = 0,
|
|
|
|
.orig_video_isVGA = 1,
|
|
|
|
.orig_video_points = 8
|
|
|
|
};
|
2011-05-05 00:07:55 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static int __init customize_machine(void)
|
|
|
|
{
|
ARM: default machine descriptor for multiplatform
Since we now have default implementations for init_time and init_irq,
the init_machine callback is the only one that is not yet optional,
but since simple DT based platforms all have the same
of_platform_populate function call in there, we can consolidate them
as well, and then actually boot with a completely empty machine_desc.
Unofortunately we cannot just default to an empty init_machine: We
cannot call of_platform_populate before init_machine because that
does not work in case of auxdata, and we cannot call it after
init_machine either because the machine might need to run code
after adding the devices.
To take the final step, this adds support for booting without defining
any machine_desc whatsoever.
For the case that CONFIG_MULTIPLATFORM is enabled, it adds a
global machine descriptor that never matches any machine but is
used as a fallback if nothing else matches. We assume that without
CONFIG_MULTIPLATFORM, we only want to boot on the systems that the kernel
is built for, so we still retain the build-time warning for missing
machine descriptors and the run-time warning when the platform does not
match in that case.
In the case that we run on a multiplatform kernel and the machine
provides a fully populated device tree, we attempt to keep booting,
hoping that no machine specific callbacks are necessary.
Finally, this also removes the misguided "select ARCH_VEXPRESS" that
was only added to avoid a build error for allnoconfig kernels.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Cc: "Russell King - ARM Linux" <linux@arm.linux.org.uk>
Cc: Rob Herring <robherring2@gmail.com>
2013-02-01 01:51:18 +08:00
|
|
|
/*
|
|
|
|
* customizes platform devices, or adds new ones
|
|
|
|
* On DT based machines, we fall back to populating the
|
|
|
|
* machine from the device tree, if no callback is provided,
|
|
|
|
* otherwise we would always need an init_machine callback.
|
|
|
|
*/
|
2010-12-20 18:18:36 +08:00
|
|
|
if (machine_desc->init_machine)
|
|
|
|
machine_desc->init_machine();
|
2016-06-01 14:52:56 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
arch_initcall(customize_machine);
|
|
|
|
|
2012-04-25 22:24:44 +08:00
|
|
|
static int __init init_machine_late(void)
|
|
|
|
{
|
2015-05-06 22:23:56 +08:00
|
|
|
struct device_node *root;
|
|
|
|
int ret;
|
|
|
|
|
2012-04-25 22:24:44 +08:00
|
|
|
if (machine_desc->init_late)
|
|
|
|
machine_desc->init_late();
|
2015-05-06 22:23:56 +08:00
|
|
|
|
|
|
|
root = of_find_node_by_path("/");
|
|
|
|
if (root) {
|
|
|
|
ret = of_property_read_string(root, "serial-number",
|
|
|
|
&system_serial);
|
|
|
|
if (ret)
|
|
|
|
system_serial = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!system_serial)
|
|
|
|
system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
|
|
|
|
system_serial_high,
|
|
|
|
system_serial_low);
|
|
|
|
|
2012-04-25 22:24:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
late_initcall(init_machine_late);
|
|
|
|
|
2024-01-24 13:12:53 +08:00
|
|
|
#ifdef CONFIG_CRASH_RESERVE
|
2016-03-15 03:34:37 +08:00
|
|
|
/*
|
|
|
|
* The crash region must be aligned to 128MB to avoid
|
|
|
|
* zImage relocating below the reserved region.
|
|
|
|
*/
|
|
|
|
#define CRASH_ALIGN (128 << 20)
|
|
|
|
|
2010-05-10 16:20:22 +08:00
|
|
|
static inline unsigned long long get_total_mem(void)
|
|
|
|
{
|
|
|
|
unsigned long total;
|
|
|
|
|
|
|
|
total = max_low_pfn - min_low_pfn;
|
|
|
|
return total << PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* reserve_crashkernel() - reserves memory are for crash kernel
|
|
|
|
*
|
|
|
|
* This function reserves memory area given in "crashkernel=" kernel command
|
|
|
|
* line parameter. The memory reserved is used by a dump capture kernel when
|
|
|
|
* primary kernel is crashing.
|
|
|
|
*/
|
|
|
|
static void __init reserve_crashkernel(void)
|
|
|
|
{
|
|
|
|
unsigned long long crash_size, crash_base;
|
|
|
|
unsigned long long total_mem;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
total_mem = get_total_mem();
|
|
|
|
ret = parse_crashkernel(boot_command_line, total_mem,
|
2023-09-14 11:31:35 +08:00
|
|
|
&crash_size, &crash_base,
|
|
|
|
NULL, NULL);
|
2022-04-01 16:30:51 +08:00
|
|
|
/* invalid value specified or crashkernel=0 */
|
|
|
|
if (ret || !crash_size)
|
2010-05-10 16:20:22 +08:00
|
|
|
return;
|
|
|
|
|
2016-03-15 03:34:37 +08:00
|
|
|
if (crash_base <= 0) {
|
2016-04-01 21:47:36 +08:00
|
|
|
unsigned long long crash_max = idmap_to_phys((u32)~0);
|
2017-07-20 06:01:38 +08:00
|
|
|
unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
|
|
|
|
if (crash_max > lowmem_max)
|
|
|
|
crash_max = lowmem_max;
|
2021-09-03 06:00:26 +08:00
|
|
|
|
|
|
|
crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
|
|
|
|
CRASH_ALIGN, crash_max);
|
2016-03-15 03:34:37 +08:00
|
|
|
if (!crash_base) {
|
|
|
|
pr_err("crashkernel reservation failed - No suitable area found.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
2021-09-03 06:00:26 +08:00
|
|
|
unsigned long long crash_max = crash_base + crash_size;
|
2016-03-15 03:34:37 +08:00
|
|
|
unsigned long long start;
|
|
|
|
|
2021-09-03 06:00:26 +08:00
|
|
|
start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
|
|
|
|
crash_base, crash_max);
|
|
|
|
if (!start) {
|
2016-03-15 03:34:37 +08:00
|
|
|
pr_err("crashkernel reservation failed - memory is in use.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-06 01:29:35 +08:00
|
|
|
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
|
|
|
|
(unsigned long)(crash_size >> 20),
|
|
|
|
(unsigned long)(crash_base >> 20),
|
|
|
|
(unsigned long)(total_mem >> 20));
|
2010-05-10 16:20:22 +08:00
|
|
|
|
2016-08-03 05:05:48 +08:00
|
|
|
/* The crashk resource must always be located in normal mem */
|
2010-05-10 16:20:22 +08:00
|
|
|
crashk_res.start = crash_base;
|
|
|
|
crashk_res.end = crash_base + crash_size - 1;
|
|
|
|
insert_resource(&iomem_resource, &crashk_res);
|
2016-08-03 05:05:48 +08:00
|
|
|
|
|
|
|
if (arm_has_idmap_alias()) {
|
|
|
|
/*
|
|
|
|
* If we have a special RAM alias for use at boot, we
|
|
|
|
* need to advertise to kexec tools where the alias is.
|
|
|
|
*/
|
|
|
|
static struct resource crashk_boot_res = {
|
|
|
|
.name = "Crash kernel (boot alias)",
|
|
|
|
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
|
|
|
|
};
|
|
|
|
|
|
|
|
crashk_boot_res.start = phys_to_idmap(crash_base);
|
|
|
|
crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
|
|
|
|
insert_resource(&iomem_resource, &crashk_boot_res);
|
|
|
|
}
|
2010-05-10 16:20:22 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void reserve_crashkernel(void) {}
|
2024-01-24 13:12:53 +08:00
|
|
|
#endif /* CONFIG_CRASH_RESERVE*/
|
2010-05-10 16:20:22 +08:00
|
|
|
|
2012-02-18 00:54:28 +08:00
|
|
|
void __init hyp_mode_check(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_ARM_VIRT_EXT
|
2013-07-19 00:20:33 +08:00
|
|
|
sync_boot_mode();
|
|
|
|
|
2012-02-18 00:54:28 +08:00
|
|
|
if (is_hyp_mode_available()) {
|
|
|
|
pr_info("CPU: All CPU(s) started in HYP mode.\n");
|
|
|
|
pr_info("CPU: Virtualization extensions available.\n");
|
|
|
|
} else if (is_hyp_mode_mismatched()) {
|
|
|
|
pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
|
|
|
|
__boot_cpu_mode & MODE_MASK);
|
|
|
|
pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
|
|
|
|
} else
|
|
|
|
pr_info("CPU: All CPU(s) started in SVC mode.\n");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-06-04 22:07:35 +08:00
|
|
|
static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
|
|
|
|
|
|
|
|
static int arm_restart(struct notifier_block *nb, unsigned long action,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
__arm_pm_restart(action, data);
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block arm_restart_nb = {
|
|
|
|
.notifier_call = arm_restart,
|
|
|
|
.priority = 128,
|
|
|
|
};
|
|
|
|
|
2011-04-29 04:27:21 +08:00
|
|
|
void __init setup_arch(char **cmdline_p)
|
|
|
|
{
|
2020-10-11 17:20:16 +08:00
|
|
|
const struct machine_desc *mdesc = NULL;
|
ARM: 9012/1: move device tree mapping out of linear region
On ARM, setting up the linear region is tricky, given the constraints
around placement and alignment of the memblocks, and how the kernel
itself as well as the DT are placed in physical memory.
Let's simplify matters a bit, by moving the device tree mapping to the
top of the address space, right between the end of the vmalloc region
and the start of the the fixmap region, and create a read-only mapping
for it that is independent of the size of the linear region, and how it
is organized.
Since this region was formerly used as a guard region, which will now be
populated fully on LPAE builds by this read-only mapping (which will
still be able to function as a guard region for stray writes), bump the
start of the [underutilized] fixmap region by 512 KB as well, to ensure
that there is always a proper guard region here. Doing so still leaves
ample room for the fixmap space, even with NR_CPUS set to its maximum
value of 32.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2020-10-11 17:21:37 +08:00
|
|
|
void *atags_vaddr = NULL;
|
2020-10-11 17:20:16 +08:00
|
|
|
|
|
|
|
if (__atags_pointer)
|
2020-10-28 21:20:55 +08:00
|
|
|
atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
|
2011-04-29 04:27:21 +08:00
|
|
|
|
|
|
|
setup_processor();
|
ARM: 9012/1: move device tree mapping out of linear region
On ARM, setting up the linear region is tricky, given the constraints
around placement and alignment of the memblocks, and how the kernel
itself as well as the DT are placed in physical memory.
Let's simplify matters a bit, by moving the device tree mapping to the
top of the address space, right between the end of the vmalloc region
and the start of the the fixmap region, and create a read-only mapping
for it that is independent of the size of the linear region, and how it
is organized.
Since this region was formerly used as a guard region, which will now be
populated fully on LPAE builds by this read-only mapping (which will
still be able to function as a guard region for stray writes), bump the
start of the [underutilized] fixmap region by 512 KB as well, to ensure
that there is always a proper guard region here. Doing so still leaves
ample room for the fixmap space, even with NR_CPUS set to its maximum
value of 32.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2020-10-11 17:21:37 +08:00
|
|
|
if (atags_vaddr) {
|
2020-10-11 17:20:16 +08:00
|
|
|
mdesc = setup_machine_fdt(atags_vaddr);
|
ARM: 9012/1: move device tree mapping out of linear region
On ARM, setting up the linear region is tricky, given the constraints
around placement and alignment of the memblocks, and how the kernel
itself as well as the DT are placed in physical memory.
Let's simplify matters a bit, by moving the device tree mapping to the
top of the address space, right between the end of the vmalloc region
and the start of the the fixmap region, and create a read-only mapping
for it that is independent of the size of the linear region, and how it
is organized.
Since this region was formerly used as a guard region, which will now be
populated fully on LPAE builds by this read-only mapping (which will
still be able to function as a guard region for stray writes), bump the
start of the [underutilized] fixmap region by 512 KB as well, to ensure
that there is always a proper guard region here. Doing so still leaves
ample room for the fixmap space, even with NR_CPUS set to its maximum
value of 32.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2020-10-11 17:21:37 +08:00
|
|
|
if (mdesc)
|
|
|
|
memblock_reserve(__atags_pointer,
|
|
|
|
fdt_totalsize(atags_vaddr));
|
|
|
|
}
|
2011-04-29 04:27:21 +08:00
|
|
|
if (!mdesc)
|
2020-10-11 17:20:16 +08:00
|
|
|
mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
|
2017-09-21 19:06:20 +08:00
|
|
|
if (!mdesc) {
|
|
|
|
early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
|
|
|
|
early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
|
|
|
|
__atags_pointer);
|
|
|
|
if (__atags_pointer)
|
2020-10-11 17:20:16 +08:00
|
|
|
early_print(" r2[]=%*ph\n", 16, atags_vaddr);
|
2017-09-21 19:06:20 +08:00
|
|
|
dump_machine_table();
|
|
|
|
}
|
|
|
|
|
2011-04-29 04:27:21 +08:00
|
|
|
machine_desc = mdesc;
|
|
|
|
machine_name = mdesc->name;
|
2014-10-28 20:40:26 +08:00
|
|
|
dump_stack_set_arch_desc("%s", mdesc->name);
|
2011-04-29 04:27:21 +08:00
|
|
|
|
2013-07-09 07:01:39 +08:00
|
|
|
if (mdesc->reboot_mode != REBOOT_HARD)
|
|
|
|
reboot_mode = mdesc->reboot_mode;
|
2011-04-29 04:27:21 +08:00
|
|
|
|
2021-07-08 09:08:29 +08:00
|
|
|
setup_initial_init_mm(_text, _etext, _edata, _end);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-27 08:13:31 +08:00
|
|
|
/* populate cmd_line too for later use, preserving boot_command_line */
|
2023-06-01 16:34:26 +08:00
|
|
|
strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
|
2010-01-27 08:13:31 +08:00
|
|
|
*cmdline_p = cmd_line;
|
2010-01-12 06:17:34 +08:00
|
|
|
|
2015-09-01 14:59:28 +08:00
|
|
|
early_fixmap_init();
|
|
|
|
early_ioremap_init();
|
2015-08-13 07:01:52 +08:00
|
|
|
|
2010-01-12 06:17:34 +08:00
|
|
|
parse_early_param();
|
|
|
|
|
2015-04-05 00:25:20 +08:00
|
|
|
#ifdef CONFIG_MMU
|
2017-04-10 18:13:59 +08:00
|
|
|
early_mm_init(mdesc);
|
2015-04-05 00:25:20 +08:00
|
|
|
#endif
|
2013-12-03 03:29:59 +08:00
|
|
|
setup_dma_zone(mdesc);
|
2016-04-07 20:03:28 +08:00
|
|
|
xen_early_init();
|
2022-09-16 16:04:57 +08:00
|
|
|
arm_efi_init();
|
2017-01-14 05:51:45 +08:00
|
|
|
/*
|
|
|
|
* Make sure the calculation for lowmem/highmem is set appropriately
|
2020-11-10 23:59:30 +08:00
|
|
|
* before reserving/allocating any memory
|
2017-01-14 05:51:45 +08:00
|
|
|
*/
|
2017-01-14 05:51:08 +08:00
|
|
|
adjust_lowmem_bounds();
|
2014-04-14 05:54:58 +08:00
|
|
|
arm_memblock_init(mdesc);
|
2017-01-14 05:51:45 +08:00
|
|
|
/* Memory may have been removed so recalculate the bounds. */
|
|
|
|
adjust_lowmem_bounds();
|
2010-07-09 23:27:52 +08:00
|
|
|
|
2015-09-01 14:59:28 +08:00
|
|
|
early_ioremap_reset();
|
|
|
|
|
2008-10-07 01:24:40 +08:00
|
|
|
paging_init(mdesc);
|
2020-10-26 06:55:16 +08:00
|
|
|
kasan_init();
|
2011-01-15 06:05:14 +08:00
|
|
|
request_standard_resources(mdesc);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-06-04 22:07:35 +08:00
|
|
|
if (mdesc->restart) {
|
|
|
|
__arm_pm_restart = mdesc->restart;
|
|
|
|
register_restart_handler(&arm_restart_nb);
|
|
|
|
}
|
2011-11-04 23:05:24 +08:00
|
|
|
|
2011-04-29 04:27:21 +08:00
|
|
|
unflatten_device_tree();
|
|
|
|
|
2011-12-15 00:01:24 +08:00
|
|
|
arm_dt_init_cpu_maps();
|
2015-07-31 22:46:19 +08:00
|
|
|
psci_dt_init();
|
2006-02-16 19:08:09 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2011-09-08 16:06:10 +08:00
|
|
|
if (is_smp()) {
|
2013-05-21 21:40:51 +08:00
|
|
|
if (!mdesc->smp_init || !mdesc->smp_init()) {
|
|
|
|
if (psci_smp_available())
|
|
|
|
smp_set_ops(&psci_smp_ops);
|
|
|
|
else if (mdesc->smp)
|
|
|
|
smp_set_ops(mdesc->smp);
|
|
|
|
}
|
2010-09-04 17:47:48 +08:00
|
|
|
smp_init_cpus();
|
ARM: kernel: build MPIDR hash function data structure
On ARM SMP systems, cores are identified by their MPIDR register.
The MPIDR guidelines in the ARM ARM do not provide strict enforcement of
MPIDR layout, only recommendations that, if followed, split the MPIDR
on ARM 32 bit platforms in three affinity levels. In multi-cluster
systems like big.LITTLE, if the affinity guidelines are followed, the
MPIDR can not be considered an index anymore. This means that the
association between logical CPU in the kernel and the HW CPU identifier
becomes somewhat more complicated requiring methods like hashing to
associate a given MPIDR to a CPU logical index, in order for the look-up
to be carried out in an efficient and scalable way.
This patch provides a function in the kernel that starting from the
cpu_logical_map, implement collision-free hashing of MPIDR values by checking
all significative bits of MPIDR affinity level bitfields. The hashing
can then be carried out through bits shifting and ORing; the resulting
hash algorithm is a collision-free though not minimal hash that can be
executed with few assembly instructions. The mpidr is filtered through a
mpidr mask that is built by checking all bits that toggle in the set of
MPIDRs corresponding to possible CPUs. Bits that do not toggle do not carry
information so they do not contribute to the resulting hash.
Pseudo code:
/* check all bits that toggle, so they are required */
for (i = 1, mpidr_mask = 0; i < num_possible_cpus(); i++)
mpidr_mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
/*
* Build shifts to be applied to aff0, aff1, aff2 values to hash the mpidr
* fls() returns the last bit set in a word, 0 if none
* ffs() returns the first bit set in a word, 0 if none
*/
fs0 = mpidr_mask[7:0] ? ffs(mpidr_mask[7:0]) - 1 : 0;
fs1 = mpidr_mask[15:8] ? ffs(mpidr_mask[15:8]) - 1 : 0;
fs2 = mpidr_mask[23:16] ? ffs(mpidr_mask[23:16]) - 1 : 0;
ls0 = fls(mpidr_mask[7:0]);
ls1 = fls(mpidr_mask[15:8]);
ls2 = fls(mpidr_mask[23:16]);
bits0 = ls0 - fs0;
bits1 = ls1 - fs1;
bits2 = ls2 - fs2;
aff0_shift = fs0;
aff1_shift = 8 + fs1 - bits0;
aff2_shift = 16 + fs2 - (bits0 + bits1);
u32 hash(u32 mpidr) {
u32 l0, l1, l2;
u32 mpidr_masked = mpidr & mpidr_mask;
l0 = mpidr_masked & 0xff;
l1 = mpidr_masked & 0xff00;
l2 = mpidr_masked & 0xff0000;
return (l0 >> aff0_shift | l1 >> aff1_shift | l2 >> aff2_shift);
}
The hashing algorithm relies on the inherent properties set in the ARM ARM
recommendations for the MPIDR. Exotic configurations, where for instance the
MPIDR values at a given affinity level have large holes, can end up requiring
big hash tables since the compression of values that can be achieved through
shifting is somewhat crippled when holes are present. Kernel warns if
the number of buckets of the resulting hash table exceeds the number of
possible CPUs by a factor of 4, which is a symptom of a very sparse HW
MPIDR configuration.
The hash algorithm is quite simple and can easily be implemented in assembly
code, to be used in code paths where the kernel virtual address space is
not set-up (ie cpu_resume) and instruction and data fetches are strongly
ordered so code must be compact and must carry out few data accesses.
Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Colin Cross <ccross@android.com>
Cc: Santosh Shilimkar <santosh.shilimkar@ti.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Amit Kucheria <amit.kucheria@linaro.org>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Reviewed-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Tested-by: Kevin Hilman <khilman@linaro.org>
Tested-by: Stephen Warren <swarren@wwwdotorg.org>
2013-05-16 17:32:09 +08:00
|
|
|
smp_build_mpidr_hash();
|
2011-09-08 16:06:10 +08:00
|
|
|
}
|
2006-02-16 19:08:09 +08:00
|
|
|
#endif
|
2012-02-18 00:54:28 +08:00
|
|
|
|
|
|
|
if (!is_smp())
|
|
|
|
hyp_mode_check();
|
|
|
|
|
2010-05-10 16:20:22 +08:00
|
|
|
reserve_crashkernel();
|
2006-02-16 19:08:09 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_VT
|
|
|
|
#if defined(CONFIG_VGA_CONSOLE)
|
2023-10-10 05:18:42 +08:00
|
|
|
vgacon_register_screen(&vgacon_screen_info);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
#endif
|
2010-12-16 21:49:34 +08:00
|
|
|
|
|
|
|
if (mdesc->init_early)
|
|
|
|
mdesc->init_early();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2024-07-31 17:19:18 +08:00
|
|
|
bool arch_cpu_is_hotpluggable(int num)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2024-07-31 17:19:18 +08:00
|
|
|
return platform_can_hotplug_cpu(num);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-01-11 01:23:29 +08:00
|
|
|
#ifdef CONFIG_HAVE_PROC_CPU
|
|
|
|
static int __init proc_cpu_init(void)
|
|
|
|
{
|
|
|
|
struct proc_dir_entry *res;
|
|
|
|
|
|
|
|
res = proc_mkdir("cpu", NULL);
|
|
|
|
if (!res)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
fs_initcall(proc_cpu_init);
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static const char *hwcap_str[] = {
|
|
|
|
"swp",
|
|
|
|
"half",
|
|
|
|
"thumb",
|
|
|
|
"26bit",
|
|
|
|
"fastmult",
|
|
|
|
"fpa",
|
|
|
|
"vfp",
|
|
|
|
"edsp",
|
|
|
|
"java",
|
2006-10-27 12:13:19 +08:00
|
|
|
"iwmmxt",
|
2006-12-18 07:59:10 +08:00
|
|
|
"crunch",
|
2008-11-06 21:23:06 +08:00
|
|
|
"thumbee",
|
2008-11-06 21:23:07 +08:00
|
|
|
"neon",
|
2009-02-11 20:13:56 +08:00
|
|
|
"vfpv3",
|
|
|
|
"vfpv3d16",
|
2011-06-03 21:15:22 +08:00
|
|
|
"tls",
|
|
|
|
"vfpv4",
|
|
|
|
"idiva",
|
|
|
|
"idivt",
|
2013-07-22 21:58:17 +08:00
|
|
|
"vfpd32",
|
2013-04-09 00:13:12 +08:00
|
|
|
"lpae",
|
2013-08-13 21:30:32 +08:00
|
|
|
"evtstrm",
|
2022-11-17 13:28:22 +08:00
|
|
|
"fphp",
|
|
|
|
"asimdhp",
|
2022-11-17 13:32:56 +08:00
|
|
|
"asimddp",
|
2022-11-17 13:37:21 +08:00
|
|
|
"asimdfhm",
|
2022-11-17 13:42:07 +08:00
|
|
|
"asimdbf16",
|
2022-11-17 13:58:31 +08:00
|
|
|
"i8mm",
|
2005-04-17 06:20:36 +08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2014-02-20 05:28:40 +08:00
|
|
|
static const char *hwcap2_str[] = {
|
2014-02-20 05:29:40 +08:00
|
|
|
"aes",
|
|
|
|
"pmull",
|
|
|
|
"sha1",
|
|
|
|
"sha2",
|
|
|
|
"crc32",
|
2022-11-17 14:05:19 +08:00
|
|
|
"sb",
|
2022-11-17 14:10:35 +08:00
|
|
|
"ssbs",
|
2014-02-20 05:28:40 +08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int c_show(struct seq_file *m, void *v)
|
|
|
|
{
|
2012-09-11 01:55:21 +08:00
|
|
|
int i, j;
|
|
|
|
u32 cpuid;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
for_each_online_cpu(i) {
|
2005-11-07 05:41:08 +08:00
|
|
|
/*
|
|
|
|
* glibc reads /proc/cpuinfo to determine the number of
|
|
|
|
* online processors, looking for lines beginning with
|
|
|
|
* "processor". Give glibc what it expects.
|
|
|
|
*/
|
|
|
|
seq_printf(m, "processor\t: %d\n", i);
|
2012-09-11 01:55:21 +08:00
|
|
|
cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
|
|
|
|
seq_printf(m, "model name\t: %s rev %d (%s)\n",
|
|
|
|
cpu_name, cpuid & 15, elf_platform);
|
|
|
|
|
2015-01-05 03:01:23 +08:00
|
|
|
#if defined(CONFIG_SMP)
|
|
|
|
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
|
|
|
per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
|
|
|
|
(per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
|
|
|
|
#else
|
|
|
|
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
|
|
|
loops_per_jiffy / (500000/HZ),
|
|
|
|
(loops_per_jiffy / (5000/HZ)) % 100);
|
|
|
|
#endif
|
2012-09-11 01:55:21 +08:00
|
|
|
/* dump out the processor features */
|
|
|
|
seq_puts(m, "Features\t: ");
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-09-11 01:55:21 +08:00
|
|
|
for (j = 0; hwcap_str[j]; j++)
|
|
|
|
if (elf_hwcap & (1 << j))
|
|
|
|
seq_printf(m, "%s ", hwcap_str[j]);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-02-20 05:28:40 +08:00
|
|
|
for (j = 0; hwcap2_str[j]; j++)
|
|
|
|
if (elf_hwcap2 & (1 << j))
|
|
|
|
seq_printf(m, "%s ", hwcap2_str[j]);
|
|
|
|
|
2012-09-11 01:55:21 +08:00
|
|
|
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
|
|
|
|
seq_printf(m, "CPU architecture: %s\n",
|
|
|
|
proc_arch[cpu_architecture()]);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-09-11 01:55:21 +08:00
|
|
|
if ((cpuid & 0x0008f000) == 0x00000000) {
|
|
|
|
/* pre-ARM7 */
|
|
|
|
seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2012-09-11 01:55:21 +08:00
|
|
|
if ((cpuid & 0x0008f000) == 0x00007000) {
|
|
|
|
/* ARM7 */
|
|
|
|
seq_printf(m, "CPU variant\t: 0x%02x\n",
|
|
|
|
(cpuid >> 16) & 127);
|
|
|
|
} else {
|
|
|
|
/* post-ARM7 */
|
|
|
|
seq_printf(m, "CPU variant\t: 0x%x\n",
|
|
|
|
(cpuid >> 20) & 15);
|
|
|
|
}
|
|
|
|
seq_printf(m, "CPU part\t: 0x%03x\n",
|
|
|
|
(cpuid >> 4) & 0xfff);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-09-11 01:55:21 +08:00
|
|
|
seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(m, "Hardware\t: %s\n", machine_name);
|
|
|
|
seq_printf(m, "Revision\t: %04x\n", system_rev);
|
2015-05-06 22:23:56 +08:00
|
|
|
seq_printf(m, "Serial\t\t: %s\n", system_serial);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *c_start(struct seq_file *m, loff_t *pos)
|
|
|
|
{
|
|
|
|
return *pos < 1 ? (void *)1 : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
++*pos;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void c_stop(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2008-01-23 03:41:07 +08:00
|
|
|
const struct seq_operations cpuinfo_op = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.start = c_start,
|
|
|
|
.next = c_next,
|
|
|
|
.stop = c_stop,
|
|
|
|
.show = c_show
|
|
|
|
};
|