mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 17:54:13 +08:00
f682a97a00
Patch series "Address issues slowing persistent memory initialization", v5. The main thing this patch set achieves is that it allows us to initialize each node worth of persistent memory independently. As a result we reduce page init time by about 2 minutes because instead of taking 30 to 40 seconds per node and going through each node one at a time, we process all 4 nodes in parallel in the case of a 12TB persistent memory setup spread evenly over 4 nodes. This patch (of 3): On systems with a large amount of memory it can take a significant amount of time to initialize all of the page structs with the PAGE_POISON_PATTERN value. I have seen it take over 2 minutes to initialize a system with over 12TB of RAM. In order to work around the issue I had to disable CONFIG_DEBUG_VM and then the boot time returned to something much more reasonable as the arch_add_memory call completed in milliseconds versus seconds. However in doing that I had to disable all of the other VM debugging on the system. In order to work around a kernel that might have CONFIG_DEBUG_VM enabled on a system that has a large amount of memory I have added a new kernel parameter named "vm_debug" that can be set to "-" in order to disable it. Link: http://lkml.kernel.org/r/20180925201921.3576.84239.stgit@localhost.localdomain Reviewed-by: Pavel Tatashin <pavel.tatashin@microsoft.com> Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
225 lines
5.5 KiB
C
225 lines
5.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* mm/debug.c
|
|
*
|
|
* mm/ specific debug routines.
|
|
*
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/trace_events.h>
|
|
#include <linux/memcontrol.h>
|
|
#include <trace/events/mmflags.h>
|
|
#include <linux/migrate.h>
|
|
#include <linux/page_owner.h>
|
|
#include <linux/ctype.h>
|
|
|
|
#include "internal.h"
|
|
|
|
char *migrate_reason_names[MR_TYPES] = {
|
|
"compaction",
|
|
"memory_failure",
|
|
"memory_hotplug",
|
|
"syscall_or_cpuset",
|
|
"mempolicy_mbind",
|
|
"numa_misplaced",
|
|
"cma",
|
|
};
|
|
|
|
const struct trace_print_flags pageflag_names[] = {
|
|
__def_pageflag_names,
|
|
{0, NULL}
|
|
};
|
|
|
|
const struct trace_print_flags gfpflag_names[] = {
|
|
__def_gfpflag_names,
|
|
{0, NULL}
|
|
};
|
|
|
|
const struct trace_print_flags vmaflag_names[] = {
|
|
__def_vmaflag_names,
|
|
{0, NULL}
|
|
};
|
|
|
|
void __dump_page(struct page *page, const char *reason)
|
|
{
|
|
bool page_poisoned = PagePoisoned(page);
|
|
int mapcount;
|
|
|
|
/*
|
|
* If struct page is poisoned don't access Page*() functions as that
|
|
* leads to recursive loop. Page*() check for poisoned pages, and calls
|
|
* dump_page() when detected.
|
|
*/
|
|
if (page_poisoned) {
|
|
pr_emerg("page:%px is uninitialized and poisoned", page);
|
|
goto hex_only;
|
|
}
|
|
|
|
/*
|
|
* Avoid VM_BUG_ON() in page_mapcount().
|
|
* page->_mapcount space in struct page is used by sl[aou]b pages to
|
|
* encode own info.
|
|
*/
|
|
mapcount = PageSlab(page) ? 0 : page_mapcount(page);
|
|
|
|
pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
|
|
page, page_ref_count(page), mapcount,
|
|
page->mapping, page_to_pgoff(page));
|
|
if (PageCompound(page))
|
|
pr_cont(" compound_mapcount: %d", compound_mapcount(page));
|
|
pr_cont("\n");
|
|
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
|
|
|
|
pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
|
|
|
|
hex_only:
|
|
print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
|
|
sizeof(unsigned long), page,
|
|
sizeof(struct page), false);
|
|
|
|
if (reason)
|
|
pr_alert("page dumped because: %s\n", reason);
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
if (!page_poisoned && page->mem_cgroup)
|
|
pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
|
|
#endif
|
|
}
|
|
|
|
void dump_page(struct page *page, const char *reason)
|
|
{
|
|
__dump_page(page, reason);
|
|
dump_page_owner(page);
|
|
}
|
|
EXPORT_SYMBOL(dump_page);
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
|
|
void dump_vma(const struct vm_area_struct *vma)
|
|
{
|
|
pr_emerg("vma %px start %px end %px\n"
|
|
"next %px prev %px mm %px\n"
|
|
"prot %lx anon_vma %px vm_ops %px\n"
|
|
"pgoff %lx file %px private_data %px\n"
|
|
"flags: %#lx(%pGv)\n",
|
|
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
|
|
vma->vm_prev, vma->vm_mm,
|
|
(unsigned long)pgprot_val(vma->vm_page_prot),
|
|
vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
|
|
vma->vm_file, vma->vm_private_data,
|
|
vma->vm_flags, &vma->vm_flags);
|
|
}
|
|
EXPORT_SYMBOL(dump_vma);
|
|
|
|
void dump_mm(const struct mm_struct *mm)
|
|
{
|
|
pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
|
|
#ifdef CONFIG_MMU
|
|
"get_unmapped_area %px\n"
|
|
#endif
|
|
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
|
|
"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
|
|
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
|
|
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
|
|
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
|
|
"start_brk %lx brk %lx start_stack %lx\n"
|
|
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
|
|
"binfmt %px flags %lx core_state %px\n"
|
|
#ifdef CONFIG_AIO
|
|
"ioctx_table %px\n"
|
|
#endif
|
|
#ifdef CONFIG_MEMCG
|
|
"owner %px "
|
|
#endif
|
|
"exe_file %px\n"
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
|
"mmu_notifier_mm %px\n"
|
|
#endif
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
|
|
#endif
|
|
"tlb_flush_pending %d\n"
|
|
"def_flags: %#lx(%pGv)\n",
|
|
|
|
mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
|
|
#ifdef CONFIG_MMU
|
|
mm->get_unmapped_area,
|
|
#endif
|
|
mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
|
|
mm->pgd, atomic_read(&mm->mm_users),
|
|
atomic_read(&mm->mm_count),
|
|
mm_pgtables_bytes(mm),
|
|
mm->map_count,
|
|
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
|
|
mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
|
|
mm->start_code, mm->end_code, mm->start_data, mm->end_data,
|
|
mm->start_brk, mm->brk, mm->start_stack,
|
|
mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
|
|
mm->binfmt, mm->flags, mm->core_state,
|
|
#ifdef CONFIG_AIO
|
|
mm->ioctx_table,
|
|
#endif
|
|
#ifdef CONFIG_MEMCG
|
|
mm->owner,
|
|
#endif
|
|
mm->exe_file,
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
|
mm->mmu_notifier_mm,
|
|
#endif
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
|
|
#endif
|
|
atomic_read(&mm->tlb_flush_pending),
|
|
mm->def_flags, &mm->def_flags
|
|
);
|
|
}
|
|
|
|
static bool page_init_poisoning __read_mostly = true;
|
|
|
|
static int __init setup_vm_debug(char *str)
|
|
{
|
|
bool __page_init_poisoning = true;
|
|
|
|
/*
|
|
* Calling vm_debug with no arguments is equivalent to requesting
|
|
* to enable all debugging options we can control.
|
|
*/
|
|
if (*str++ != '=' || !*str)
|
|
goto out;
|
|
|
|
__page_init_poisoning = false;
|
|
if (*str == '-')
|
|
goto out;
|
|
|
|
while (*str) {
|
|
switch (tolower(*str)) {
|
|
case'p':
|
|
__page_init_poisoning = true;
|
|
break;
|
|
default:
|
|
pr_err("vm_debug option '%c' unknown. skipped\n",
|
|
*str);
|
|
}
|
|
|
|
str++;
|
|
}
|
|
out:
|
|
if (page_init_poisoning && !__page_init_poisoning)
|
|
pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
|
|
|
|
page_init_poisoning = __page_init_poisoning;
|
|
|
|
return 1;
|
|
}
|
|
__setup("vm_debug", setup_vm_debug);
|
|
|
|
void page_init_poison(struct page *page, size_t size)
|
|
{
|
|
if (page_init_poisoning)
|
|
memset(page, PAGE_POISON_PATTERN, size);
|
|
}
|
|
EXPORT_SYMBOL_GPL(page_init_poison);
|
|
#endif /* CONFIG_DEBUG_VM */
|