mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-27 13:05:03 +08:00
16a12ee619
commitdd0ff4d12d
upstream. The vmemmap pages is marked by kmemleak when allocated from memblock. Remove it from kmemleak when freeing the page. Otherwise, when we reuse the page, kmemleak may report such an error and then stop working. kmemleak: Cannot insert 0xffff98fb6eab3d40 into the object search tree (overlaps existing) kmemleak: Kernel memory leak detector disabled kmemleak: Object 0xffff98fb6be00000 (size 335544320): kmemleak: comm "swapper", pid 0, jiffies 4294892296 kmemleak: min_count = 0 kmemleak: count = 0 kmemleak: flags = 0x1 kmemleak: checksum = 0 kmemleak: backtrace: Link: https://lkml.kernel.org/r/20220819094005.2928241-1-liushixin2@huawei.com Fixes:f41f2ed43c
(mm: hugetlb: free the vmemmap pages associated with each HugeTLB page) Signed-off-by: Liu Shixin <liushixin2@huawei.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
130 lines
3.4 KiB
C
130 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Bootmem core functions.
|
|
*
|
|
* Copyright (c) 2020, Bytedance.
|
|
*
|
|
* Author: Muchun Song <songmuchun@bytedance.com>
|
|
*
|
|
*/
|
|
#include <linux/mm.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/bootmem_info.h>
|
|
#include <linux/memory_hotplug.h>
|
|
#include <linux/kmemleak.h>
|
|
|
|
void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
|
|
{
|
|
page->freelist = (void *)type;
|
|
SetPagePrivate(page);
|
|
set_page_private(page, info);
|
|
page_ref_inc(page);
|
|
}
|
|
|
|
void put_page_bootmem(struct page *page)
|
|
{
|
|
unsigned long type;
|
|
|
|
type = (unsigned long) page->freelist;
|
|
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
|
|
type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
|
|
|
|
if (page_ref_dec_return(page) == 1) {
|
|
page->freelist = NULL;
|
|
ClearPagePrivate(page);
|
|
set_page_private(page, 0);
|
|
INIT_LIST_HEAD(&page->lru);
|
|
kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
|
|
free_reserved_page(page);
|
|
}
|
|
}
|
|
|
|
#ifndef CONFIG_SPARSEMEM_VMEMMAP
|
|
static void __init register_page_bootmem_info_section(unsigned long start_pfn)
|
|
{
|
|
unsigned long mapsize, section_nr, i;
|
|
struct mem_section *ms;
|
|
struct page *page, *memmap;
|
|
struct mem_section_usage *usage;
|
|
|
|
section_nr = pfn_to_section_nr(start_pfn);
|
|
ms = __nr_to_section(section_nr);
|
|
|
|
/* Get section's memmap address */
|
|
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
|
|
|
|
/*
|
|
* Get page for the memmap's phys address
|
|
* XXX: need more consideration for sparse_vmemmap...
|
|
*/
|
|
page = virt_to_page(memmap);
|
|
mapsize = sizeof(struct page) * PAGES_PER_SECTION;
|
|
mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
|
|
|
|
/* remember memmap's page */
|
|
for (i = 0; i < mapsize; i++, page++)
|
|
get_page_bootmem(section_nr, page, SECTION_INFO);
|
|
|
|
usage = ms->usage;
|
|
page = virt_to_page(usage);
|
|
|
|
mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
|
|
|
|
for (i = 0; i < mapsize; i++, page++)
|
|
get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
|
|
|
|
}
|
|
#else /* CONFIG_SPARSEMEM_VMEMMAP */
|
|
static void __init register_page_bootmem_info_section(unsigned long start_pfn)
|
|
{
|
|
unsigned long mapsize, section_nr, i;
|
|
struct mem_section *ms;
|
|
struct page *page, *memmap;
|
|
struct mem_section_usage *usage;
|
|
|
|
section_nr = pfn_to_section_nr(start_pfn);
|
|
ms = __nr_to_section(section_nr);
|
|
|
|
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
|
|
|
|
register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
|
|
|
|
usage = ms->usage;
|
|
page = virt_to_page(usage);
|
|
|
|
mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
|
|
|
|
for (i = 0; i < mapsize; i++, page++)
|
|
get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
|
|
}
|
|
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
|
|
|
|
void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
|
|
{
|
|
unsigned long i, pfn, end_pfn, nr_pages;
|
|
int node = pgdat->node_id;
|
|
struct page *page;
|
|
|
|
nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
|
|
page = virt_to_page(pgdat);
|
|
|
|
for (i = 0; i < nr_pages; i++, page++)
|
|
get_page_bootmem(node, page, NODE_INFO);
|
|
|
|
pfn = pgdat->node_start_pfn;
|
|
end_pfn = pgdat_end_pfn(pgdat);
|
|
|
|
/* register section info */
|
|
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
|
|
/*
|
|
* Some platforms can assign the same pfn to multiple nodes - on
|
|
* node0 as well as nodeN. To avoid registering a pfn against
|
|
* multiple nodes we check that this pfn does not already
|
|
* reside in some other nodes.
|
|
*/
|
|
if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
|
|
register_page_bootmem_info_section(pfn);
|
|
}
|
|
}
|