mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
mm: shmem: avoid allocating huge pages larger than MAX_PAGECACHE_ORDER for shmem
Similar to commitd659b715e9
("mm/huge_memory: avoid PMD-size page cache if needed"), ARM64 can support 512MB PMD-sized THP when the base page size is 64KB, which is larger than the maximum supported page cache size MAX_PAGECACHE_ORDER. This is not expected. To fix this issue, use THP_ORDERS_ALL_FILE_DEFAULT for shmem to filter allowable huge orders. [baolin.wang@linux.alibaba.com: remove comment, per Barry] Link: https://lkml.kernel.org/r/c55d7ef7-78aa-4ed6-b897-c3e03a3f3ab7@linux.alibaba.com [wangkefeng.wang@huawei.com: remove local `orders'] Link: https://lkml.kernel.org/r/87769ae8-b6c6-4454-925d-1864364af9c8@huawei.com Link: https://lkml.kernel.org/r/117121665254442c3c7f585248296495e5e2b45c.1722404078.git.baolin.wang@linux.alibaba.com Fixes:e7a2ab7b3b
("mm: shmem: add mTHP support for anonymous shmem") Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Barry Song <baohua@kernel.org> Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: Gavin Shan <gshan@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Lance Yang <ioworker0@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5161b48712
commit
b66b1b71d7
@ -1629,11 +1629,6 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
|
|||||||
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
|
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
|
||||||
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
|
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
|
||||||
unsigned long vm_flags = vma->vm_flags;
|
unsigned long vm_flags = vma->vm_flags;
|
||||||
/*
|
|
||||||
* Check all the (large) orders below HPAGE_PMD_ORDER + 1 that
|
|
||||||
* are enabled for this vma.
|
|
||||||
*/
|
|
||||||
unsigned long orders = BIT(PMD_ORDER + 1) - 1;
|
|
||||||
loff_t i_size;
|
loff_t i_size;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
@ -1678,7 +1673,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
|
|||||||
if (global_huge)
|
if (global_huge)
|
||||||
mask |= READ_ONCE(huge_shmem_orders_inherit);
|
mask |= READ_ONCE(huge_shmem_orders_inherit);
|
||||||
|
|
||||||
return orders & mask;
|
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
|
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
|
||||||
|
Loading…
Reference in New Issue
Block a user