mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
78f39084b4
We must add hugetlb_free_vmemmap=on (or "off") to the boot cmdline and
reboot the server to enable or disable the feature of optimizing vmemmap
pages associated with HugeTLB pages. However, rebooting usually takes a
long time. So add a sysctl to enable or disable the feature at runtime
without rebooting. Why we need this? There are 3 use cases.
1) The feature of minimizing overhead of struct page associated with
each HugeTLB is disabled by default without passing
"hugetlb_free_vmemmap=on" to the boot cmdline. When we (ByteDance)
deliver the servers to the users who want to enable this feature, they
have to configure the grub (change boot cmdline) and reboot the
servers, whereas rebooting usually takes a long time (we have thousands
of servers). It's a very bad experience for the users. So we need a
approach to enable this feature after rebooting. This is a use case in
our practical environment.
2) Some use cases are that HugeTLB pages are allocated 'on the fly'
instead of being pulled from the HugeTLB pool, those workloads would be
affected with this feature enabled. Those workloads could be
identified by the characteristics of they never explicitly allocating
huge pages with 'nr_hugepages' but only set 'nr_overcommit_hugepages'
and then let the pages be allocated from the buddy allocator at fault
time. We can confirm it is a real use case from the commit
099730d674
. For those workloads, the page fault time could be ~2x
slower than before. We suspect those users want to disable this
feature if the system has enabled this before and they don't think the
memory savings benefit is enough to make up for the performance drop.
3) If the workload which wants vmemmap pages to be optimized and the
workload which wants to set 'nr_overcommit_hugepages' and does not want
the extera overhead at fault time when the overcommitted pages be
allocated from the buddy allocator are deployed in the same server.
The user could enable this feature and set 'nr_hugepages' and
'nr_overcommit_hugepages', then disable the feature. In this case, the
overcommited HugeTLB pages will not encounter the extra overhead at
fault time.
Link: https://lkml.kernel.org/r/20220512041142.39501-5-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Iurii Zaikin <yzaikin@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
212 lines
6.2 KiB
C
212 lines
6.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Optimize vmemmap pages associated with HugeTLB
|
|
*
|
|
* Copyright (c) 2020, Bytedance. All rights reserved.
|
|
*
|
|
* Author: Muchun Song <songmuchun@bytedance.com>
|
|
*
|
|
* See Documentation/vm/vmemmap_dedup.rst
|
|
*/
|
|
#define pr_fmt(fmt) "HugeTLB: " fmt
|
|
|
|
#include <linux/memory_hotplug.h>
|
|
#include "hugetlb_vmemmap.h"
|
|
|
|
/*
|
|
* There are a lot of struct page structures associated with each HugeTLB page.
|
|
* For tail pages, the value of compound_head is the same. So we can reuse first
|
|
* page of head page structures. We map the virtual addresses of all the pages
|
|
* of tail page structures to the head page struct, and then free these page
|
|
* frames. Therefore, we need to reserve one pages as vmemmap areas.
|
|
*/
|
|
#define RESERVE_VMEMMAP_NR 1U
|
|
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
|
|
|
|
enum vmemmap_optimize_mode {
|
|
VMEMMAP_OPTIMIZE_OFF,
|
|
VMEMMAP_OPTIMIZE_ON,
|
|
};
|
|
|
|
DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
|
|
hugetlb_optimize_vmemmap_key);
|
|
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
|
|
|
|
static enum vmemmap_optimize_mode vmemmap_optimize_mode =
|
|
IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON);
|
|
|
|
static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
|
|
{
|
|
if (vmemmap_optimize_mode == to)
|
|
return;
|
|
|
|
if (to == VMEMMAP_OPTIMIZE_OFF)
|
|
static_branch_dec(&hugetlb_optimize_vmemmap_key);
|
|
else
|
|
static_branch_inc(&hugetlb_optimize_vmemmap_key);
|
|
WRITE_ONCE(vmemmap_optimize_mode, to);
|
|
}
|
|
|
|
static int __init hugetlb_vmemmap_early_param(char *buf)
|
|
{
|
|
bool enable;
|
|
enum vmemmap_optimize_mode mode;
|
|
|
|
if (kstrtobool(buf, &enable))
|
|
return -EINVAL;
|
|
|
|
mode = enable ? VMEMMAP_OPTIMIZE_ON : VMEMMAP_OPTIMIZE_OFF;
|
|
vmemmap_optimize_mode_switch(mode);
|
|
|
|
return 0;
|
|
}
|
|
early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param);
|
|
|
|
/*
|
|
* Previously discarded vmemmap pages will be allocated and remapping
|
|
* after this function returns zero.
|
|
*/
|
|
int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
|
|
{
|
|
int ret;
|
|
unsigned long vmemmap_addr = (unsigned long)head;
|
|
unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
|
|
|
|
if (!HPageVmemmapOptimized(head))
|
|
return 0;
|
|
|
|
vmemmap_addr += RESERVE_VMEMMAP_SIZE;
|
|
vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
|
|
vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
|
|
vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
|
|
|
|
/*
|
|
* The pages which the vmemmap virtual address range [@vmemmap_addr,
|
|
* @vmemmap_end) are mapped to are freed to the buddy allocator, and
|
|
* the range is mapped to the page which @vmemmap_reuse is mapped to.
|
|
* When a HugeTLB page is freed to the buddy allocator, previously
|
|
* discarded vmemmap pages must be allocated and remapping.
|
|
*/
|
|
ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
|
|
GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
|
|
if (!ret) {
|
|
ClearHPageVmemmapOptimized(head);
|
|
static_branch_dec(&hugetlb_optimize_vmemmap_key);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
|
|
{
|
|
unsigned long vmemmap_addr = (unsigned long)head;
|
|
unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
|
|
|
|
vmemmap_pages = hugetlb_optimize_vmemmap_pages(h);
|
|
if (!vmemmap_pages)
|
|
return;
|
|
|
|
if (READ_ONCE(vmemmap_optimize_mode) == VMEMMAP_OPTIMIZE_OFF)
|
|
return;
|
|
|
|
static_branch_inc(&hugetlb_optimize_vmemmap_key);
|
|
|
|
vmemmap_addr += RESERVE_VMEMMAP_SIZE;
|
|
vmemmap_end = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
|
|
vmemmap_reuse = vmemmap_addr - PAGE_SIZE;
|
|
|
|
/*
|
|
* Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
|
|
* to the page which @vmemmap_reuse is mapped to, then free the pages
|
|
* which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
|
|
*/
|
|
if (vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
|
|
static_branch_dec(&hugetlb_optimize_vmemmap_key);
|
|
else
|
|
SetHPageVmemmapOptimized(head);
|
|
}
|
|
|
|
void __init hugetlb_vmemmap_init(struct hstate *h)
|
|
{
|
|
unsigned int nr_pages = pages_per_huge_page(h);
|
|
unsigned int vmemmap_pages;
|
|
|
|
/*
|
|
* There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
|
|
* page structs that can be used when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP,
|
|
* so add a BUILD_BUG_ON to catch invalid usage of the tail struct page.
|
|
*/
|
|
BUILD_BUG_ON(__NR_USED_SUBPAGE >=
|
|
RESERVE_VMEMMAP_SIZE / sizeof(struct page));
|
|
|
|
if (!is_power_of_2(sizeof(struct page))) {
|
|
pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n");
|
|
static_branch_disable(&hugetlb_optimize_vmemmap_key);
|
|
return;
|
|
}
|
|
|
|
vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
|
|
/*
|
|
* The head page is not to be freed to buddy allocator, the other tail
|
|
* pages will map to the head page, so they can be freed.
|
|
*
|
|
* Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
|
|
* on some architectures (e.g. aarch64). See Documentation/arm64/
|
|
* hugetlbpage.rst for more details.
|
|
*/
|
|
if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
|
|
h->optimize_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
|
|
|
|
pr_info("can optimize %d vmemmap pages for %s\n",
|
|
h->optimize_vmemmap_pages, h->name);
|
|
}
|
|
|
|
#ifdef CONFIG_PROC_SYSCTL
|
|
static int hugetlb_optimize_vmemmap_handler(struct ctl_table *table, int write,
|
|
void *buffer, size_t *length,
|
|
loff_t *ppos)
|
|
{
|
|
int ret;
|
|
enum vmemmap_optimize_mode mode;
|
|
static DEFINE_MUTEX(sysctl_mutex);
|
|
|
|
if (write && !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
mutex_lock(&sysctl_mutex);
|
|
mode = vmemmap_optimize_mode;
|
|
table->data = &mode;
|
|
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
|
if (write && !ret)
|
|
vmemmap_optimize_mode_switch(mode);
|
|
mutex_unlock(&sysctl_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static struct ctl_table hugetlb_vmemmap_sysctls[] = {
|
|
{
|
|
.procname = "hugetlb_optimize_vmemmap",
|
|
.maxlen = sizeof(enum vmemmap_optimize_mode),
|
|
.mode = 0644,
|
|
.proc_handler = hugetlb_optimize_vmemmap_handler,
|
|
.extra1 = SYSCTL_ZERO,
|
|
.extra2 = SYSCTL_ONE,
|
|
},
|
|
{ }
|
|
};
|
|
|
|
static __init int hugetlb_vmemmap_sysctls_init(void)
|
|
{
|
|
/*
|
|
* If "memory_hotplug.memmap_on_memory" is enabled or "struct page"
|
|
* crosses page boundaries, the vmemmap pages cannot be optimized.
|
|
*/
|
|
if (!mhp_memmap_on_memory() && is_power_of_2(sizeof(struct page)))
|
|
register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
|
|
|
|
return 0;
|
|
}
|
|
late_initcall(hugetlb_vmemmap_sysctls_init);
|
|
#endif /* CONFIG_PROC_SYSCTL */
|