linux/arch/mips/loongson64/numa.c

209 lines
5.3 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
* Institute of Computing Technology
* Author: Xiang Gao, gaoxiang@ict.ac.cn
* Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
MIPS: Audit and remove any unnecessary uses of module.h Historically a lot of these existed because we did not have a distinction between what was modular code and what was providing support to modules via EXPORT_SYMBOL and friends. That changed when we forked out support for the latter into the export.h file. This means we should be able to reduce the usage of module.h in code that is obj-y Makefile or bool Kconfig. In the case of some code where it is modular, we can extend that to also include files that are building basic support functionality but not related to loading or registering the final module; such files also have no need whatsoever for module.h The advantage in removing such instances is that module.h itself sources about 15 other headers; adding significantly to what we feed cpp, and it can obscure what headers we are effectively using. Since module.h might have been the implicit source for init.h (for __init) and for export.h (for EXPORT_SYMBOL) we consider each instance for the presence of either and replace/add as needed. Also note that MODULE_DEVICE_TABLE is a no-op for non-modular code. Build coverage of all the mips defconfigs revealed the module.h header was masking a couple of implicit include instances, so we add the appropriate headers there. Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: David Daney <david.daney@cavium.com> Cc: John Crispin <john@phrozen.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: "Steven J. Hill" <steven.hill@cavium.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15131/ [james.hogan@imgtec.com: Preserve sort order where it already exists] Signed-off-by: James Hogan <james.hogan@imgtec.com>
2017-01-29 10:05:57 +08:00
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <linux/irq.h>
#include <asm/bootinfo.h>
#include <asm/mc146818-time.h>
#include <asm/time.h>
#include <asm/wbflush.h>
#include <boot_param.h>
#include <loongson.h>
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(__node_distances);
struct pglist_data *__node_data[MAX_NUMNODES];
EXPORT_SYMBOL(__node_data);
cpumask_t __node_cpumask[MAX_NUMNODES];
EXPORT_SYMBOL(__node_cpumask);
static void cpu_node_probe(void)
{
int i;
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
for (i = 0; i < loongson_sysconf.nr_nodes; i++) {
node_set_state(num_online_nodes(), N_POSSIBLE);
node_set_online(num_online_nodes());
}
pr_info("NUMA: Discovered %d cpus on %d nodes\n",
loongson_sysconf.nr_cpus, num_online_nodes());
}
static int __init compute_node_distance(int row, int col)
{
int package_row = row * loongson_sysconf.cores_per_node /
loongson_sysconf.cores_per_package;
int package_col = col * loongson_sysconf.cores_per_node /
loongson_sysconf.cores_per_package;
if (col == row)
return LOCAL_DISTANCE;
else if (package_row == package_col)
return 40;
else
return 100;
}
static void __init init_topology_matrix(void)
{
int row, col;
for (row = 0; row < MAX_NUMNODES; row++)
for (col = 0; col < MAX_NUMNODES; col++)
__node_distances[row][col] = -1;
for_each_online_node(row) {
for_each_online_node(col) {
__node_distances[row][col] =
compute_node_distance(row, col);
}
}
}
static void __init node_mem_init(unsigned int node)
{
struct pglist_data *nd;
unsigned long node_addrspace_offset;
unsigned long start_pfn, end_pfn;
unsigned long nd_pa;
int tnid;
const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
node_addrspace_offset = nid_to_addrbase(node);
pr_info("Node%d's addrspace_offset is 0x%lx\n",
node, node_addrspace_offset);
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
node, start_pfn, end_pfn);
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node);
if (!nd_pa)
panic("Cannot allocate %zu bytes for node %d data\n",
nd_size, node);
nd = __va(nd_pa);
memset(nd, 0, sizeof(struct pglist_data));
tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
if (tnid != node)
pr_info("NODE_DATA(%d) on node %d\n", node, tnid);
__node_data[node] = nd;
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
if (node == 0) {
/* kernel start address */
unsigned long kernel_start_pfn = PFN_DOWN(__pa_symbol(&_text));
/* kernel end address */
unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
/* used by finalize_initrd() */
max_low_pfn = end_pfn;
/* Reserve the kernel text/data/bss */
memblock_reserve(kernel_start_pfn << PAGE_SHIFT,
((kernel_end_pfn - kernel_start_pfn) << PAGE_SHIFT));
/* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20);
/* Reserve pfn range 0~node[0]->node_start_pfn */
memblock_reserve(0, PAGE_SIZE * start_pfn);
}
}
static __init void prom_meminit(void)
{
unsigned int node, cpu, active_cpu = 0;
cpu_node_probe();
init_topology_matrix();
for (node = 0; node < loongson_sysconf.nr_nodes; node++) {
if (node_online(node)) {
szmem(node);
node_mem_init(node);
cpumask_clear(&__node_cpumask[node]);
}
}
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
node = cpu / loongson_sysconf.cores_per_node;
if (node >= num_online_nodes())
node = 0;
if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
continue;
cpumask_set_cpu(active_cpu, &__node_cpumask[node]);
pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
active_cpu++;
}
}
void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = {0, };
pagetable_init();
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
zones_size[ZONE_NORMAL] = max_low_pfn;
mm: use free_area_init() instead of free_area_init_nodes() free_area_init() has effectively became a wrapper for free_area_init_nodes() and there is no point of keeping it. Still free_area_init() name is shorter and more general as it does not imply necessity to initialize multiple nodes. Rename free_area_init_nodes() to free_area_init(), update the callers and drop old version of free_area_init(). Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: Hoan Tran <hoan@os.amperecomputing.com> [arm64] Reviewed-by: Baoquan He <bhe@redhat.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Brian Cain <bcain@codeaurora.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200412194859.12663-6-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-04 06:57:10 +08:00
free_area_init(zones_size);
}
void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock: rename free_all_bootmem to memblock_free_all The conversion is done using sed -i 's@free_all_bootmem@memblock_free_all@' \ $(git grep -l free_all_bootmem) Link: http://lkml.kernel.org/r/1536927045-23536-26-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-31 06:09:30 +08:00
memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
}
/* All PCI device belongs to logical Node-0 */
int pcibus_to_node(struct pci_bus *bus)
{
return 0;
}
EXPORT_SYMBOL(pcibus_to_node);
void __init prom_init_numa_memory(void)
{
pr_info("CP0_Config3: CP0 16.3 (0x%x)\n", read_c0_config3());
pr_info("CP0_PageGrain: CP0 5.1 (0x%x)\n", read_c0_pagegrain());
prom_meminit();
}
pg_data_t * __init arch_alloc_nodedata(int nid)
{
return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
}
void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
{
__node_data[nid] = pgdat;
}