mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
ia64: remove custom __early_pfn_to_nid()
The ia64 implementation of __early_pfn_to_nid() essentially relies on the same data as the generic implementation. The correspondence between memory ranges and nodes is set in memblock during early memory initialization in register_active_ranges() function. The initialization of sparsemem that requires early_pfn_to_nid() happens later and it can use the memblock information like the other architectures. Link: https://lkml.kernel.org/r/20201101170454.9567-3-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matt Turner <mattst88@gmail.com> Cc: Meelis Roos <mroos@linux.ee> Cc: Michael Schmitz <schmitzmic@gmail.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
36d40290c8
commit
03e92a5e09
@ -342,9 +342,6 @@ config HOLES_IN_ZONE
|
||||
bool
|
||||
default y if VIRTUAL_MEM_MAP
|
||||
|
||||
config HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
def_bool NUMA && SPARSEMEM
|
||||
|
||||
config HAVE_ARCH_NODEDATA_EXTENSION
|
||||
def_bool y
|
||||
depends on NUMA
|
||||
|
@ -58,36 +58,6 @@ paddr_to_nid(unsigned long paddr)
|
||||
EXPORT_SYMBOL(paddr_to_nid);
|
||||
|
||||
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
|
||||
/*
|
||||
* Because of holes evaluate on section limits.
|
||||
* If the section of memory exists, then return the node where the section
|
||||
* resides. Otherwise return node 0 as the default. This is used by
|
||||
* SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
|
||||
* the section resides.
|
||||
*/
|
||||
int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||
struct mminit_pfnnid_cache *state)
|
||||
{
|
||||
int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
|
||||
|
||||
if (section >= state->last_start && section < state->last_end)
|
||||
return state->last_nid;
|
||||
|
||||
for (i = 0; i < num_node_memblks; i++) {
|
||||
ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
|
||||
esec = (node_memblk[i].start_paddr + node_memblk[i].size +
|
||||
((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
|
||||
if (section >= ssec && section < esec) {
|
||||
state->last_start = ssec;
|
||||
state->last_end = esec;
|
||||
state->last_nid = node_memblk[i].nid;
|
||||
return node_memblk[i].nid;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void numa_clear_node(int cpu)
|
||||
{
|
||||
unmap_cpu_from_node(cpu, NUMA_NO_NODE);
|
||||
|
@ -2434,9 +2434,6 @@ static inline int early_pfn_to_nid(unsigned long pfn)
|
||||
#else
|
||||
/* please see mm/page_alloc.c */
|
||||
extern int __meminit early_pfn_to_nid(unsigned long pfn);
|
||||
/* there is a per-arch backend function. */
|
||||
extern int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||
struct mminit_pfnnid_cache *state);
|
||||
#endif
|
||||
|
||||
extern void set_dma_reserve(unsigned long new_dma_reserve);
|
||||
|
@ -1428,17 +1428,6 @@ void sparse_init(void);
|
||||
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
/*
|
||||
* During memory init memblocks map pfns to nids. The search is expensive and
|
||||
* this caches recent lookups. The implementation of __early_pfn_to_nid
|
||||
* may treat start/end as pfns or sections.
|
||||
*/
|
||||
struct mminit_pfnnid_cache {
|
||||
unsigned long last_start;
|
||||
unsigned long last_end;
|
||||
int last_nid;
|
||||
};
|
||||
|
||||
/*
|
||||
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
|
||||
* need to check pfn validity within that MAX_ORDER_NR_PAGES block.
|
||||
|
@ -1559,14 +1559,23 @@ void __free_pages_core(struct page *page, unsigned int order)
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
|
||||
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
|
||||
/*
|
||||
* During memory init memblocks map pfns to nids. The search is expensive and
|
||||
* this caches recent lookups. The implementation of __early_pfn_to_nid
|
||||
* treats start/end as pfns.
|
||||
*/
|
||||
struct mminit_pfnnid_cache {
|
||||
unsigned long last_start;
|
||||
unsigned long last_end;
|
||||
int last_nid;
|
||||
};
|
||||
|
||||
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
|
||||
|
||||
/*
|
||||
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
|
||||
*/
|
||||
int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||
static int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||
struct mminit_pfnnid_cache *state)
|
||||
{
|
||||
unsigned long start_pfn, end_pfn;
|
||||
@ -1584,7 +1593,6 @@ int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||
|
||||
return nid;
|
||||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
|
||||
|
||||
int __meminit early_pfn_to_nid(unsigned long pfn)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user