mm: convert hugetlb_page_mapping_lock_write to folio

The page is only used to get the mapping, so the folio will do just as
well.  Both callers already have a folio available, so this saves a call
to compound_head().

Link: https://lkml.kernel.org/r/20240412193510.2356957-7-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jane Chu  <jane.chu@oracle.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-04-12 20:35:03 +01:00 committed by Andrew Morton
parent fed5348ee2
commit 6e8cda4c2c
4 changed files with 8 additions and 8 deletions

View File

@ -178,7 +178,7 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address);
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages[MAX_NUMNODES];
@ -297,8 +297,8 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
static inline struct address_space *hugetlb_page_mapping_lock_write(
struct page *hpage)
static inline struct address_space *hugetlb_folio_mapping_lock_write(
struct folio *folio)
{
return NULL;
}

View File

@ -2155,13 +2155,13 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
/*
* Find and lock address space (mapping) in write mode.
*
* Upon entry, the page is locked which means that page_mapping() is
* Upon entry, the folio is locked which means that folio_mapping() is
* stable. Due to locking order, we can only trylock_write. If we can
* not get the lock, simply return NULL to caller.
*/
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
{
struct address_space *mapping = page_mapping(hpage);
struct address_space *mapping = folio_mapping(folio);
if (!mapping)
return mapping;

View File

@ -1624,7 +1624,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* TTU_RMAP_LOCKED to indicate we have taken the lock
* at this higher level.
*/
mapping = hugetlb_page_mapping_lock_write(hpage);
mapping = hugetlb_folio_mapping_lock_write(folio);
if (mapping) {
try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
i_mmap_unlock_write(mapping);

View File

@ -1425,7 +1425,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
* semaphore in write mode here and set TTU_RMAP_LOCKED
* to let lower levels know we have taken the lock.
*/
mapping = hugetlb_page_mapping_lock_write(&src->page);
mapping = hugetlb_folio_mapping_lock_write(src);
if (unlikely(!mapping))
goto unlock_put_anon;