mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
mm: convert huge_zero_page to huge_zero_folio
With all callers of is_huge_zero_page() converted, we can now switch the huge_zero_page itself from being a compound page to a folio. Link: https://lkml.kernel.org/r/20240326202833.523759-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b002a7b0a5
commit
5691753d73
@ -348,17 +348,12 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
|
|||||||
|
|
||||||
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
|
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
|
||||||
|
|
||||||
extern struct page *huge_zero_page;
|
extern struct folio *huge_zero_folio;
|
||||||
extern unsigned long huge_zero_pfn;
|
extern unsigned long huge_zero_pfn;
|
||||||
|
|
||||||
static inline bool is_huge_zero_page(const struct page *page)
|
|
||||||
{
|
|
||||||
return READ_ONCE(huge_zero_page) == page;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool is_huge_zero_folio(const struct folio *folio)
|
static inline bool is_huge_zero_folio(const struct folio *folio)
|
||||||
{
|
{
|
||||||
return READ_ONCE(huge_zero_page) == &folio->page;
|
return READ_ONCE(huge_zero_folio) == folio;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
||||||
@ -371,9 +366,14 @@ static inline bool is_huge_zero_pud(pud_t pud)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
|
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
|
||||||
void mm_put_huge_zero_page(struct mm_struct *mm);
|
void mm_put_huge_zero_page(struct mm_struct *mm);
|
||||||
|
|
||||||
|
static inline struct page *mm_get_huge_zero_page(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
return &mm_get_huge_zero_folio(mm)->page;
|
||||||
|
}
|
||||||
|
|
||||||
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
|
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
|
||||||
|
|
||||||
static inline bool thp_migration_supported(void)
|
static inline bool thp_migration_supported(void)
|
||||||
@ -485,11 +485,6 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_huge_zero_page(const struct page *page)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool is_huge_zero_folio(const struct folio *folio)
|
static inline bool is_huge_zero_folio(const struct folio *folio)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
@ -74,7 +74,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
|
|||||||
struct shrink_control *sc);
|
struct shrink_control *sc);
|
||||||
|
|
||||||
static atomic_t huge_zero_refcount;
|
static atomic_t huge_zero_refcount;
|
||||||
struct page *huge_zero_page __read_mostly;
|
struct folio *huge_zero_folio __read_mostly;
|
||||||
unsigned long huge_zero_pfn __read_mostly = ~0UL;
|
unsigned long huge_zero_pfn __read_mostly = ~0UL;
|
||||||
unsigned long huge_anon_orders_always __read_mostly;
|
unsigned long huge_anon_orders_always __read_mostly;
|
||||||
unsigned long huge_anon_orders_madvise __read_mostly;
|
unsigned long huge_anon_orders_madvise __read_mostly;
|
||||||
@ -192,24 +192,24 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
|
|||||||
|
|
||||||
static bool get_huge_zero_page(void)
|
static bool get_huge_zero_page(void)
|
||||||
{
|
{
|
||||||
struct page *zero_page;
|
struct folio *zero_folio;
|
||||||
retry:
|
retry:
|
||||||
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
|
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
|
zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
|
||||||
HPAGE_PMD_ORDER);
|
HPAGE_PMD_ORDER);
|
||||||
if (!zero_page) {
|
if (!zero_folio) {
|
||||||
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
|
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
|
if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
__free_pages(zero_page, compound_order(zero_page));
|
folio_put(zero_folio);
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
|
WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
|
||||||
|
|
||||||
/* We take additional reference here. It will be put back by shrinker */
|
/* We take additional reference here. It will be put back by shrinker */
|
||||||
atomic_set(&huge_zero_refcount, 2);
|
atomic_set(&huge_zero_refcount, 2);
|
||||||
@ -227,10 +227,10 @@ static void put_huge_zero_page(void)
|
|||||||
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
|
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct page *mm_get_huge_zero_page(struct mm_struct *mm)
|
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
|
if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
|
||||||
return READ_ONCE(huge_zero_page);
|
return READ_ONCE(huge_zero_folio);
|
||||||
|
|
||||||
if (!get_huge_zero_page())
|
if (!get_huge_zero_page())
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -238,7 +238,7 @@ struct page *mm_get_huge_zero_page(struct mm_struct *mm)
|
|||||||
if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
|
if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
|
||||||
put_huge_zero_page();
|
put_huge_zero_page();
|
||||||
|
|
||||||
return READ_ONCE(huge_zero_page);
|
return READ_ONCE(huge_zero_folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mm_put_huge_zero_page(struct mm_struct *mm)
|
void mm_put_huge_zero_page(struct mm_struct *mm)
|
||||||
@ -258,10 +258,10 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
|
|||||||
struct shrink_control *sc)
|
struct shrink_control *sc)
|
||||||
{
|
{
|
||||||
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
||||||
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
|
||||||
BUG_ON(zero_page == NULL);
|
BUG_ON(zero_folio == NULL);
|
||||||
WRITE_ONCE(huge_zero_pfn, ~0UL);
|
WRITE_ONCE(huge_zero_pfn, ~0UL);
|
||||||
__free_pages(zero_page, compound_order(zero_page));
|
folio_put(zero_folio);
|
||||||
return HPAGE_PMD_NR;
|
return HPAGE_PMD_NR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1340,7 +1340,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||||||
* since we already have a zero page to copy. It just takes a
|
* since we already have a zero page to copy. It just takes a
|
||||||
* reference.
|
* reference.
|
||||||
*/
|
*/
|
||||||
mm_get_huge_zero_page(dst_mm);
|
mm_get_huge_zero_folio(dst_mm);
|
||||||
goto out_zero_page;
|
goto out_zero_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user