mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 13:13:57 +08:00
f52407ce2d
To initialize hotadded node, some pages are allocated. At that time, the node hasn't memory, this makes the allocation always fail. In such case, let's allocate pages from other nodes. Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Yakui Zhao <yakui.zhao@intel.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
166 lines
4.3 KiB
C
166 lines
4.3 KiB
C
/*
|
|
* Virtual Memory Map support
|
|
*
|
|
* (C) 2007 sgi. Christoph Lameter.
|
|
*
|
|
* Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
|
|
* virt_to_page, page_address() to be implemented as a base offset
|
|
* calculation without memory access.
|
|
*
|
|
* However, virtual mappings need a page table and TLBs. Many Linux
|
|
* architectures already map their physical space using 1-1 mappings
|
|
* via TLBs. For those arches the virtual memmory map is essentially
|
|
* for free if we use the same page size as the 1-1 mappings. In that
|
|
* case the overhead consists of a few additional pages that are
|
|
* allocated to create a view of memory for vmemmap.
|
|
*
|
|
* The architecture is expected to provide a vmemmap_populate() function
|
|
* to instantiate the mapping.
|
|
*/
|
|
#include <linux/mm.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/module.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/sched.h>
|
|
#include <asm/dma.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
/*
|
|
* Allocate a block of memory to be used to back the virtual memory map
|
|
* or to back the page tables that are used to create the mapping.
|
|
* Uses the main allocators if they are available, else bootmem.
|
|
*/
|
|
|
|
static void * __init_refok __earlyonly_bootmem_alloc(int node,
|
|
unsigned long size,
|
|
unsigned long align,
|
|
unsigned long goal)
|
|
{
|
|
return __alloc_bootmem_node(NODE_DATA(node), size, align, goal);
|
|
}
|
|
|
|
|
|
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
|
|
{
|
|
/* If the main allocator is up use that, fallback to bootmem. */
|
|
if (slab_is_available()) {
|
|
struct page *page;
|
|
|
|
if (node_state(node, N_HIGH_MEMORY))
|
|
page = alloc_pages_node(node,
|
|
GFP_KERNEL | __GFP_ZERO, get_order(size));
|
|
else
|
|
page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
|
|
get_order(size));
|
|
if (page)
|
|
return page_address(page);
|
|
return NULL;
|
|
} else
|
|
return __earlyonly_bootmem_alloc(node, size, size,
|
|
__pa(MAX_DMA_ADDRESS));
|
|
}
|
|
|
|
void __meminit vmemmap_verify(pte_t *pte, int node,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
unsigned long pfn = pte_pfn(*pte);
|
|
int actual_node = early_pfn_to_nid(pfn);
|
|
|
|
if (node_distance(actual_node, node) > LOCAL_DISTANCE)
|
|
printk(KERN_WARNING "[%lx-%lx] potential offnode "
|
|
"page_structs\n", start, end - 1);
|
|
}
|
|
|
|
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
|
|
{
|
|
pte_t *pte = pte_offset_kernel(pmd, addr);
|
|
if (pte_none(*pte)) {
|
|
pte_t entry;
|
|
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
|
|
if (!p)
|
|
return NULL;
|
|
entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
|
|
set_pte_at(&init_mm, addr, pte, entry);
|
|
}
|
|
return pte;
|
|
}
|
|
|
|
pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
|
|
{
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
if (pmd_none(*pmd)) {
|
|
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
|
|
if (!p)
|
|
return NULL;
|
|
pmd_populate_kernel(&init_mm, pmd, p);
|
|
}
|
|
return pmd;
|
|
}
|
|
|
|
pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
|
|
{
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
if (pud_none(*pud)) {
|
|
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
|
|
if (!p)
|
|
return NULL;
|
|
pud_populate(&init_mm, pud, p);
|
|
}
|
|
return pud;
|
|
}
|
|
|
|
pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
|
|
{
|
|
pgd_t *pgd = pgd_offset_k(addr);
|
|
if (pgd_none(*pgd)) {
|
|
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
|
|
if (!p)
|
|
return NULL;
|
|
pgd_populate(&init_mm, pgd, p);
|
|
}
|
|
return pgd;
|
|
}
|
|
|
|
int __meminit vmemmap_populate_basepages(struct page *start_page,
|
|
unsigned long size, int node)
|
|
{
|
|
unsigned long addr = (unsigned long)start_page;
|
|
unsigned long end = (unsigned long)(start_page + size);
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
for (; addr < end; addr += PAGE_SIZE) {
|
|
pgd = vmemmap_pgd_populate(addr, node);
|
|
if (!pgd)
|
|
return -ENOMEM;
|
|
pud = vmemmap_pud_populate(pgd, addr, node);
|
|
if (!pud)
|
|
return -ENOMEM;
|
|
pmd = vmemmap_pmd_populate(pud, addr, node);
|
|
if (!pmd)
|
|
return -ENOMEM;
|
|
pte = vmemmap_pte_populate(pmd, addr, node);
|
|
if (!pte)
|
|
return -ENOMEM;
|
|
vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
|
|
{
|
|
struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
|
|
int error = vmemmap_populate(map, PAGES_PER_SECTION, nid);
|
|
if (error)
|
|
return NULL;
|
|
|
|
return map;
|
|
}
|