mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
huge_memory: convert unmap_page() to unmap_folio()
Remove a folio->page->folio conversion. Link: https://lkml.kernel.org/r/20220902194653.1739778-54-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
3e9a13daa6
commit
684555aacc
@ -2355,13 +2355,12 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unmap_page(struct page *page)
|
static void unmap_folio(struct folio *folio)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
|
||||||
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
|
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
|
||||||
TTU_SYNC;
|
TTU_SYNC;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Anon pages need migration entries to preserve them, but file
|
* Anon pages need migration entries to preserve them, but file
|
||||||
@ -2378,7 +2377,7 @@ static void remap_page(struct folio *folio, unsigned long nr)
|
|||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
/* If unmap_page() uses try_to_migrate() on file, remove this check */
|
/* If unmap_folio() uses try_to_migrate() on file, remove this check */
|
||||||
if (!folio_test_anon(folio))
|
if (!folio_test_anon(folio))
|
||||||
return;
|
return;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
@ -2428,7 +2427,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
|
|||||||
* for example lock_page() which set PG_waiters.
|
* for example lock_page() which set PG_waiters.
|
||||||
*
|
*
|
||||||
* Note that for mapped sub-pages of an anonymous THP,
|
* Note that for mapped sub-pages of an anonymous THP,
|
||||||
* PG_anon_exclusive has been cleared in unmap_page() and is stored in
|
* PG_anon_exclusive has been cleared in unmap_folio() and is stored in
|
||||||
* the migration entry instead from where remap_page() will restore it.
|
* the migration entry instead from where remap_page() will restore it.
|
||||||
* We can still have PG_anon_exclusive set on effectively unmapped and
|
* We can still have PG_anon_exclusive set on effectively unmapped and
|
||||||
* unreferenced sub-pages of an anonymous THP: we can simply drop
|
* unreferenced sub-pages of an anonymous THP: we can simply drop
|
||||||
@ -2700,7 +2699,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Racy check if we can split the page, before unmap_page() will
|
* Racy check if we can split the page, before unmap_folio() will
|
||||||
* split PMDs
|
* split PMDs
|
||||||
*/
|
*/
|
||||||
if (!can_split_folio(folio, &extra_pins)) {
|
if (!can_split_folio(folio, &extra_pins)) {
|
||||||
@ -2708,7 +2707,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
unmap_page(&folio->page);
|
unmap_folio(folio);
|
||||||
|
|
||||||
/* block interrupt reentry in xa_lock and spinlock */
|
/* block interrupt reentry in xa_lock and spinlock */
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
Loading…
Reference in New Issue
Block a user