mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
mm/swap: convert delete_from_swap_cache() to take a folio
All but one caller already has a folio, so convert it to use a folio. Link: https://lkml.kernel.org/r/20220617175020.717127-22-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b98c359f1d
commit
75fa68a5d8
@ -1007,12 +1007,13 @@ static int me_swapcache_dirty(struct page_state *ps, struct page *p)
|
||||
|
||||
static int me_swapcache_clean(struct page_state *ps, struct page *p)
|
||||
{
|
||||
struct folio *folio = page_folio(p);
|
||||
int ret;
|
||||
|
||||
delete_from_swap_cache(p);
|
||||
delete_from_swap_cache(folio);
|
||||
|
||||
ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
|
||||
unlock_page(p);
|
||||
folio_unlock(folio);
|
||||
|
||||
if (has_extra_refcount(ps, p, false))
|
||||
ret = MF_FAILED;
|
||||
|
@ -1691,7 +1691,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
|
||||
return;
|
||||
|
||||
folio_wait_writeback(folio);
|
||||
delete_from_swap_cache(&folio->page);
|
||||
delete_from_swap_cache(folio);
|
||||
spin_lock_irq(&info->lock);
|
||||
/*
|
||||
* Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
|
||||
@ -1789,7 +1789,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
|
||||
if (sgp == SGP_WRITE)
|
||||
folio_mark_accessed(folio);
|
||||
|
||||
delete_from_swap_cache(&folio->page);
|
||||
delete_from_swap_cache(folio);
|
||||
folio_mark_dirty(folio);
|
||||
swap_free(swap);
|
||||
|
||||
|
@ -38,7 +38,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
|
||||
gfp_t gfp, void **shadowp);
|
||||
void __delete_from_swap_cache(struct page *page,
|
||||
swp_entry_t entry, void *shadow);
|
||||
void delete_from_swap_cache(struct page *page);
|
||||
void delete_from_swap_cache(struct folio *folio);
|
||||
void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
||||
unsigned long end);
|
||||
void free_swap_cache(struct page *page);
|
||||
@ -140,7 +140,7 @@ static inline void __delete_from_swap_cache(struct page *page,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void delete_from_swap_cache(struct page *page)
|
||||
static inline void delete_from_swap_cache(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -222,22 +222,22 @@ fail:
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called only on pages that have
|
||||
* This must be called only on folios that have
|
||||
* been verified to be in the swap cache and locked.
|
||||
* It will never put the page into the free list,
|
||||
* the caller has a reference on the page.
|
||||
* It will never put the folio into the free list,
|
||||
* the caller has a reference on the folio.
|
||||
*/
|
||||
void delete_from_swap_cache(struct page *page)
|
||||
void delete_from_swap_cache(struct folio *folio)
|
||||
{
|
||||
swp_entry_t entry = { .val = page_private(page) };
|
||||
swp_entry_t entry = folio_swap_entry(folio);
|
||||
struct address_space *address_space = swap_address_space(entry);
|
||||
|
||||
xa_lock_irq(&address_space->i_pages);
|
||||
__delete_from_swap_cache(page, entry, NULL);
|
||||
__delete_from_swap_cache(&folio->page, entry, NULL);
|
||||
xa_unlock_irq(&address_space->i_pages);
|
||||
|
||||
put_swap_page(page, entry);
|
||||
page_ref_sub(page, thp_nr_pages(page));
|
||||
put_swap_page(&folio->page, entry);
|
||||
folio_ref_sub(folio, folio_nr_pages(folio));
|
||||
}
|
||||
|
||||
void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
||||
|
@ -1617,7 +1617,7 @@ int try_to_free_swap(struct page *page)
|
||||
if (pm_suspended_storage())
|
||||
return 0;
|
||||
|
||||
delete_from_swap_cache(&folio->page);
|
||||
delete_from_swap_cache(folio);
|
||||
folio_set_dirty(folio);
|
||||
return 1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user