mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-30 08:04:13 +08:00
20e6926dcb
Tim found: WARNING: at arch/x86/kernel/smpboot.c:324 topology_sane.isra.2+0x6f/0x80() Hardware name: S2600CP sched: CPU #1's llc-sibling CPU #0 is not on the same node! [node: 1 != 0]. Ignoring dependency. smpboot: Booting Node 1, Processors #1 Modules linked in: Pid: 0, comm: swapper/1 Not tainted 3.9.0-0-generic #1 Call Trace: set_cpu_sibling_map+0x279/0x449 start_secondary+0x11d/0x1e5 Don Morris reproduced on a HP z620 workstation, and bisected it to commite8d1955258
("acpi, memory-hotplug: parse SRAT before memblock is ready") It turns out movable_map has some problems, and it breaks several things 1. numa_init is called several times, NOT just for srat. so those nodes_clear(numa_nodes_parsed) memset(&numa_meminfo, 0, sizeof(numa_meminfo)) can not be just removed. Need to consider sequence is: numaq, srat, amd, dummy. and make fall back path working. 2. simply split acpi_numa_init to early_parse_srat. a. that early_parse_srat is NOT called for ia64, so you break ia64. b. for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, NUMA_NO_NODE) still left in numa_init. So it will just clear result from early_parse_srat. it should be moved before that.... c. it breaks ACPI_TABLE_OVERIDE...as the acpi table scan is moved early before override from INITRD is settled. 3. that patch TITLE is total misleading, there is NO x86 in the title, but it changes critical x86 code. It caused x86 guys did not pay attention to find the problem early. Those patches really should be routed via tip/x86/mm. 4. after that commit, following range can not use movable ram: a. real_mode code.... well..funny, legacy Node0 [0,1M) could be hot-removed? b. initrd... it will be freed after booting, so it could be on movable... c. crashkernel for kdump...: looks like we can not put kdump kernel above 4G anymore. d. init_mem_mapping: can not put page table high anymore. e. initmem_init: vmemmap can not be high local node anymore. That is not good. If node is hotplugable, the mem related range like page table and vmemmap could be on the that node without problem and should be on that node. We have workaround patch that could fix some problems, but some can not be fixed. So just remove that offending commit and related ones including:f7210e6c4a
("mm/memblock.c: use CONFIG_HAVE_MEMBLOCK_NODE_MAP to protect movablecore_map in memblock_overlaps_region().")01a178a94e
("acpi, memory-hotplug: support getting hotplug info from SRAT")27168d38fa
("acpi, memory-hotplug: extend movablemem_map ranges to the end of node")e8d1955258
("acpi, memory-hotplug: parse SRAT before memblock is ready")fb06bc8e5f
("page_alloc: bootmem limit with movablecore_map")42f47e27e7
("page_alloc: make movablemem_map have higher priority")6981ec3114
("page_alloc: introduce zone_movable_limit[] to keep movable limit for nodes")34b71f1e04
("page_alloc: add movable_memmap kernel parameter")4d59a75125
("x86: get pg_data_t's memory from other node") Later we should have patches that will make sure kernel put page table and vmemmap on local node ram instead of push them down to node0. Also need to find way to put other kernel used ram to local node ram. Reported-by: Tim Gardner <tim.gardner@canonical.com> Reported-by: Don Morris <don.morris@hp.com> Bisected-by: Don Morris <don.morris@hp.com> Tested-by: Don Morris <don.morris@hp.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Thomas Renninger <trenn@suse.de> Cc: Tejun Heo <tj@kernel.org> Cc: Tang Chen <tangchen@cn.fujitsu.com> Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
346 lines
8.6 KiB
C
346 lines
8.6 KiB
C
/*
|
|
* acpi_numa.c - ACPI NUMA support
|
|
*
|
|
* Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
|
|
*
|
|
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*
|
|
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
*
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/types.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/acpi.h>
|
|
#include <linux/numa.h>
|
|
#include <acpi/acpi_bus.h>
|
|
|
|
#define PREFIX "ACPI: "
|
|
|
|
#define ACPI_NUMA 0x80000000
|
|
#define _COMPONENT ACPI_NUMA
|
|
ACPI_MODULE_NAME("numa");
|
|
|
|
static nodemask_t nodes_found_map = NODE_MASK_NONE;
|
|
|
|
/* maps to convert between proximity domain and logical node ID */
|
|
static int pxm_to_node_map[MAX_PXM_DOMAINS]
|
|
= { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
|
|
static int node_to_pxm_map[MAX_NUMNODES]
|
|
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
|
|
|
|
unsigned char acpi_srat_revision __initdata;
|
|
|
|
int pxm_to_node(int pxm)
|
|
{
|
|
if (pxm < 0)
|
|
return NUMA_NO_NODE;
|
|
return pxm_to_node_map[pxm];
|
|
}
|
|
|
|
int node_to_pxm(int node)
|
|
{
|
|
if (node < 0)
|
|
return PXM_INVAL;
|
|
return node_to_pxm_map[node];
|
|
}
|
|
|
|
void __acpi_map_pxm_to_node(int pxm, int node)
|
|
{
|
|
if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
|
|
pxm_to_node_map[pxm] = node;
|
|
if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
|
|
node_to_pxm_map[node] = pxm;
|
|
}
|
|
|
|
int acpi_map_pxm_to_node(int pxm)
|
|
{
|
|
int node = pxm_to_node_map[pxm];
|
|
|
|
if (node < 0) {
|
|
if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
|
|
return NUMA_NO_NODE;
|
|
node = first_unset_node(nodes_found_map);
|
|
__acpi_map_pxm_to_node(pxm, node);
|
|
node_set(node, nodes_found_map);
|
|
}
|
|
|
|
return node;
|
|
}
|
|
|
|
static void __init
|
|
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
|
|
{
|
|
|
|
ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
|
|
|
|
if (!header)
|
|
return;
|
|
|
|
switch (header->type) {
|
|
|
|
case ACPI_SRAT_TYPE_CPU_AFFINITY:
|
|
#ifdef ACPI_DEBUG_OUTPUT
|
|
{
|
|
struct acpi_srat_cpu_affinity *p =
|
|
(struct acpi_srat_cpu_affinity *)header;
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
"SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
|
|
p->apic_id, p->local_sapic_eid,
|
|
p->proximity_domain_lo,
|
|
(p->flags & ACPI_SRAT_CPU_ENABLED)?
|
|
"enabled" : "disabled"));
|
|
}
|
|
#endif /* ACPI_DEBUG_OUTPUT */
|
|
break;
|
|
|
|
case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
|
|
#ifdef ACPI_DEBUG_OUTPUT
|
|
{
|
|
struct acpi_srat_mem_affinity *p =
|
|
(struct acpi_srat_mem_affinity *)header;
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
"SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
|
|
(unsigned long)p->base_address,
|
|
(unsigned long)p->length,
|
|
p->proximity_domain,
|
|
(p->flags & ACPI_SRAT_MEM_ENABLED)?
|
|
"enabled" : "disabled",
|
|
(p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
|
|
" hot-pluggable" : "",
|
|
(p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
|
|
" non-volatile" : ""));
|
|
}
|
|
#endif /* ACPI_DEBUG_OUTPUT */
|
|
break;
|
|
|
|
case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
|
|
#ifdef ACPI_DEBUG_OUTPUT
|
|
{
|
|
struct acpi_srat_x2apic_cpu_affinity *p =
|
|
(struct acpi_srat_x2apic_cpu_affinity *)header;
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
|
"SRAT Processor (x2apicid[0x%08x]) in"
|
|
" proximity domain %d %s\n",
|
|
p->apic_id,
|
|
p->proximity_domain,
|
|
(p->flags & ACPI_SRAT_CPU_ENABLED) ?
|
|
"enabled" : "disabled"));
|
|
}
|
|
#endif /* ACPI_DEBUG_OUTPUT */
|
|
break;
|
|
default:
|
|
printk(KERN_WARNING PREFIX
|
|
"Found unsupported SRAT entry (type = 0x%x)\n",
|
|
header->type);
|
|
break;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* A lot of BIOS fill in 10 (= no distance) everywhere. This messes
|
|
* up the NUMA heuristics which wants the local node to have a smaller
|
|
* distance than the others.
|
|
* Do some quick checks here and only use the SLIT if it passes.
|
|
*/
|
|
static __init int slit_valid(struct acpi_table_slit *slit)
|
|
{
|
|
int i, j;
|
|
int d = slit->locality_count;
|
|
for (i = 0; i < d; i++) {
|
|
for (j = 0; j < d; j++) {
|
|
u8 val = slit->entry[d*i + j];
|
|
if (i == j) {
|
|
if (val != LOCAL_DISTANCE)
|
|
return 0;
|
|
} else if (val <= LOCAL_DISTANCE)
|
|
return 0;
|
|
}
|
|
}
|
|
return 1;
|
|
}
|
|
|
|
static int __init acpi_parse_slit(struct acpi_table_header *table)
|
|
{
|
|
struct acpi_table_slit *slit;
|
|
|
|
if (!table)
|
|
return -EINVAL;
|
|
|
|
slit = (struct acpi_table_slit *)table;
|
|
|
|
if (!slit_valid(slit)) {
|
|
printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
|
|
return -EINVAL;
|
|
}
|
|
acpi_numa_slit_init(slit);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void __init __attribute__ ((weak))
|
|
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
|
|
{
|
|
printk(KERN_WARNING PREFIX
|
|
"Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
|
|
return;
|
|
}
|
|
|
|
|
|
static int __init
|
|
acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
|
|
const unsigned long end)
|
|
{
|
|
struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
|
|
|
|
processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header;
|
|
if (!processor_affinity)
|
|
return -EINVAL;
|
|
|
|
acpi_table_print_srat_entry(header);
|
|
|
|
/* let architecture-dependent part to do it */
|
|
acpi_numa_x2apic_affinity_init(processor_affinity);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __init
|
|
acpi_parse_processor_affinity(struct acpi_subtable_header *header,
|
|
const unsigned long end)
|
|
{
|
|
struct acpi_srat_cpu_affinity *processor_affinity;
|
|
|
|
processor_affinity = (struct acpi_srat_cpu_affinity *)header;
|
|
if (!processor_affinity)
|
|
return -EINVAL;
|
|
|
|
acpi_table_print_srat_entry(header);
|
|
|
|
/* let architecture-dependent part to do it */
|
|
acpi_numa_processor_affinity_init(processor_affinity);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __initdata parsed_numa_memblks;
|
|
|
|
static int __init
|
|
acpi_parse_memory_affinity(struct acpi_subtable_header * header,
|
|
const unsigned long end)
|
|
{
|
|
struct acpi_srat_mem_affinity *memory_affinity;
|
|
|
|
memory_affinity = (struct acpi_srat_mem_affinity *)header;
|
|
if (!memory_affinity)
|
|
return -EINVAL;
|
|
|
|
acpi_table_print_srat_entry(header);
|
|
|
|
/* let architecture-dependent part to do it */
|
|
if (!acpi_numa_memory_affinity_init(memory_affinity))
|
|
parsed_numa_memblks++;
|
|
return 0;
|
|
}
|
|
|
|
static int __init acpi_parse_srat(struct acpi_table_header *table)
|
|
{
|
|
struct acpi_table_srat *srat;
|
|
if (!table)
|
|
return -EINVAL;
|
|
|
|
srat = (struct acpi_table_srat *)table;
|
|
acpi_srat_revision = srat->header.revision;
|
|
|
|
/* Real work done in acpi_table_parse_srat below. */
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __init
|
|
acpi_table_parse_srat(enum acpi_srat_type id,
|
|
acpi_tbl_entry_handler handler, unsigned int max_entries)
|
|
{
|
|
return acpi_table_parse_entries(ACPI_SIG_SRAT,
|
|
sizeof(struct acpi_table_srat), id,
|
|
handler, max_entries);
|
|
}
|
|
|
|
int __init acpi_numa_init(void)
|
|
{
|
|
int cnt = 0;
|
|
|
|
/*
|
|
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
|
|
* SRAT cpu entries could have different order with that in MADT.
|
|
* So go over all cpu entries in SRAT to get apicid to node mapping.
|
|
*/
|
|
|
|
/* SRAT: Static Resource Affinity Table */
|
|
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
|
|
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
|
|
acpi_parse_x2apic_affinity, 0);
|
|
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
|
|
acpi_parse_processor_affinity, 0);
|
|
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
|
|
acpi_parse_memory_affinity,
|
|
NR_NODE_MEMBLKS);
|
|
}
|
|
|
|
/* SLIT: System Locality Information Table */
|
|
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
|
|
|
|
acpi_numa_arch_fixup();
|
|
|
|
if (cnt < 0)
|
|
return cnt;
|
|
else if (!parsed_numa_memblks)
|
|
return -ENOENT;
|
|
return 0;
|
|
}
|
|
|
|
int acpi_get_pxm(acpi_handle h)
|
|
{
|
|
unsigned long long pxm;
|
|
acpi_status status;
|
|
acpi_handle handle;
|
|
acpi_handle phandle = h;
|
|
|
|
do {
|
|
handle = phandle;
|
|
status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
|
|
if (ACPI_SUCCESS(status))
|
|
return pxm;
|
|
status = acpi_get_parent(handle, &phandle);
|
|
} while (ACPI_SUCCESS(status));
|
|
return -1;
|
|
}
|
|
|
|
int acpi_get_node(acpi_handle *handle)
|
|
{
|
|
int pxm, node = -1;
|
|
|
|
pxm = acpi_get_pxm(handle);
|
|
if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
|
|
node = acpi_map_pxm_to_node(pxm);
|
|
|
|
return node;
|
|
}
|
|
EXPORT_SYMBOL(acpi_get_node);
|