2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 17:23:55 +08:00
linux-next/mm/hugetlb_vmemmap.h
Muchun Song 47010c040d mm: hugetlb_vmemmap: cleanup CONFIG_HUGETLB_PAGE_FREE_VMEMMAP*
The word of "free" is not expressive enough to express the feature of
optimizing vmemmap pages associated with each HugeTLB, rename this keywork
to "optimize".  In this patch , cheanup configs to make code more
expressive.

Link: https://lkml.kernel.org/r/20220404074652.68024-4-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-04-28 23:16:15 -07:00

46 lines
1.2 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Optimize vmemmap pages associated with HugeTLB
*
* Copyright (c) 2020, Bytedance. All rights reserved.
*
* Author: Muchun Song <songmuchun@bytedance.com>
*/
#ifndef _LINUX_HUGETLB_VMEMMAP_H
#define _LINUX_HUGETLB_VMEMMAP_H
#include <linux/hugetlb.h>
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head);
void hugetlb_vmemmap_free(struct hstate *h, struct page *head);
void hugetlb_vmemmap_init(struct hstate *h);
/*
* How many vmemmap pages associated with a HugeTLB page that can be
* optimized and freed to the buddy allocator.
*/
static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
{
return h->optimize_vmemmap_pages;
}
#else
static inline int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
{
return 0;
}
static inline void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
{
}
static inline void hugetlb_vmemmap_init(struct hstate *h)
{
}
static inline unsigned int hugetlb_optimize_vmemmap_pages(struct hstate *h)
{
return 0;
}
#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
#endif /* _LINUX_HUGETLB_VMEMMAP_H */