mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 23:54:04 +08:00
mm/hugetlb: move page order check inside hugetlb_cma_reserve()
All platforms could benefit from page order check against MAX_PAGE_ORDER before allocating a CMA area for gigantic hugetlb pages. Let's move this check from individual platforms to generic hugetlb. Link: https://lkml.kernel.org/r/20240209054221.1403364-1-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Reviewed-by: Jane Chu <jane.chu@oracle.com> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4acef5694e
commit
ce70cfb145
@ -45,13 +45,6 @@ void __init arm64_hugetlb_cma_reserve(void)
|
|||||||
else
|
else
|
||||||
order = CONT_PMD_SHIFT - PAGE_SHIFT;
|
order = CONT_PMD_SHIFT - PAGE_SHIFT;
|
||||||
|
|
||||||
/*
|
|
||||||
* HugeTLB CMA reservation is required for gigantic
|
|
||||||
* huge pages which could not be allocated via the
|
|
||||||
* page allocator. Just warn if there is any change
|
|
||||||
* breaking this assumption.
|
|
||||||
*/
|
|
||||||
WARN_ON(order <= MAX_PAGE_ORDER);
|
|
||||||
hugetlb_cma_reserve(order);
|
hugetlb_cma_reserve(order);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CMA */
|
#endif /* CONFIG_CMA */
|
||||||
|
@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void)
|
|||||||
*/
|
*/
|
||||||
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
|
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
|
||||||
|
|
||||||
if (order) {
|
if (order)
|
||||||
VM_WARN_ON(order <= MAX_PAGE_ORDER);
|
|
||||||
hugetlb_cma_reserve(order);
|
hugetlb_cma_reserve(order);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
@ -7720,6 +7720,13 @@ void __init hugetlb_cma_reserve(int order)
|
|||||||
bool node_specific_cma_alloc = false;
|
bool node_specific_cma_alloc = false;
|
||||||
int nid;
|
int nid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HugeTLB CMA reservation is required for gigantic
|
||||||
|
* huge pages which could not be allocated via the
|
||||||
|
* page allocator. Just warn if there is any change
|
||||||
|
* breaking this assumption.
|
||||||
|
*/
|
||||||
|
VM_WARN_ON(order <= MAX_PAGE_ORDER);
|
||||||
cma_reserve_called = true;
|
cma_reserve_called = true;
|
||||||
|
|
||||||
if (!hugetlb_cma_size)
|
if (!hugetlb_cma_size)
|
||||||
|
Loading…
Reference in New Issue
Block a user