mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
mm: remove PageSwapCache
This flag is now only used on folios, so we can remove all the page accessors and reword the comments that refer to them. Link: https://lkml.kernel.org/r/20240821193445.2294269-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
6f394ee9dd
commit
32f51ead3d
@ -109,7 +109,7 @@ struct page {
|
|||||||
/**
|
/**
|
||||||
* @private: Mapping-private opaque data.
|
* @private: Mapping-private opaque data.
|
||||||
* Usually used for buffer_heads if PagePrivate.
|
* Usually used for buffer_heads if PagePrivate.
|
||||||
* Used for swp_entry_t if PageSwapCache.
|
* Used for swp_entry_t if swapcache flag set.
|
||||||
* Indicates order in the buddy system if PageBuddy.
|
* Indicates order in the buddy system if PageBuddy.
|
||||||
*/
|
*/
|
||||||
unsigned long private;
|
unsigned long private;
|
||||||
|
@ -574,15 +574,10 @@ static __always_inline bool folio_test_swapcache(const struct folio *folio)
|
|||||||
test_bit(PG_swapcache, const_folio_flags(folio, 0));
|
test_bit(PG_swapcache, const_folio_flags(folio, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool PageSwapCache(const struct page *page)
|
FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
|
||||||
{
|
FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
|
||||||
return folio_test_swapcache(page_folio(page));
|
|
||||||
}
|
|
||||||
|
|
||||||
SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
|
|
||||||
CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
|
|
||||||
#else
|
#else
|
||||||
PAGEFLAG_FALSE(SwapCache, swapcache)
|
FOLIO_FLAG_FALSE(swapcache)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
PAGEFLAG(Unevictable, unevictable, PF_HEAD)
|
PAGEFLAG(Unevictable, unevictable, PF_HEAD)
|
||||||
|
19
mm/ksm.c
19
mm/ksm.c
@ -909,12 +909,13 @@ again:
|
|||||||
*/
|
*/
|
||||||
while (!folio_try_get(folio)) {
|
while (!folio_try_get(folio)) {
|
||||||
/*
|
/*
|
||||||
* Another check for page->mapping != expected_mapping would
|
* Another check for folio->mapping != expected_mapping
|
||||||
* work here too. We have chosen the !PageSwapCache test to
|
* would work here too. We have chosen to test the
|
||||||
* optimize the common case, when the page is or is about to
|
* swapcache flag to optimize the common case, when the
|
||||||
* be freed: PageSwapCache is cleared (under spin_lock_irq)
|
* folio is or is about to be freed: the swapcache flag
|
||||||
* in the ref_freeze section of __remove_mapping(); but Anon
|
* is cleared (under spin_lock_irq) in the ref_freeze
|
||||||
* folio->mapping reset to NULL later, in free_pages_prepare().
|
* section of __remove_mapping(); but anon folio->mapping
|
||||||
|
* is reset to NULL later, in free_pages_prepare().
|
||||||
*/
|
*/
|
||||||
if (!folio_test_swapcache(folio))
|
if (!folio_test_swapcache(folio))
|
||||||
goto stale;
|
goto stale;
|
||||||
@ -945,7 +946,7 @@ again:
|
|||||||
|
|
||||||
stale:
|
stale:
|
||||||
/*
|
/*
|
||||||
* We come here from above when page->mapping or !PageSwapCache
|
* We come here from above when folio->mapping or the swapcache flag
|
||||||
* suggests that the node is stale; but it might be under migration.
|
* suggests that the node is stale; but it might be under migration.
|
||||||
* We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
|
* We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
|
||||||
* before checking whether node->kpfn has been changed.
|
* before checking whether node->kpfn has been changed.
|
||||||
@ -1452,7 +1453,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need the page lock to read a stable PageSwapCache in
|
* We need the folio lock to read a stable swapcache flag in
|
||||||
* write_protect_page(). We use trylock_page() instead of
|
* write_protect_page(). We use trylock_page() instead of
|
||||||
* lock_page() because we don't want to wait here - we
|
* lock_page() because we don't want to wait here - we
|
||||||
* prefer to continue scanning and merging different pages,
|
* prefer to continue scanning and merging different pages,
|
||||||
@ -3123,7 +3124,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
|
|||||||
* newfolio->mapping was set in advance; now we need smp_wmb()
|
* newfolio->mapping was set in advance; now we need smp_wmb()
|
||||||
* to make sure that the new stable_node->kpfn is visible
|
* to make sure that the new stable_node->kpfn is visible
|
||||||
* to ksm_get_folio() before it can see that folio->mapping
|
* to ksm_get_folio() before it can see that folio->mapping
|
||||||
* has gone stale (or that folio_test_swapcache has been cleared).
|
* has gone stale (or that the swapcache flag has been cleared).
|
||||||
*/
|
*/
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
folio_set_stable_node(folio, NULL);
|
folio_set_stable_node(folio, NULL);
|
||||||
|
@ -639,7 +639,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
|
|||||||
folio_migrate_ksm(newfolio, folio);
|
folio_migrate_ksm(newfolio, folio);
|
||||||
/*
|
/*
|
||||||
* Please do not reorder this without considering how mm/ksm.c's
|
* Please do not reorder this without considering how mm/ksm.c's
|
||||||
* ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
|
* ksm_get_folio() depends upon ksm_migrate_page() and the
|
||||||
|
* swapcache flag.
|
||||||
*/
|
*/
|
||||||
if (folio_test_swapcache(folio))
|
if (folio_test_swapcache(folio))
|
||||||
folio_clear_swapcache(folio);
|
folio_clear_swapcache(folio);
|
||||||
|
11
mm/shmem.c
11
mm/shmem.c
@ -502,8 +502,8 @@ static int shmem_replace_entry(struct address_space *mapping,
|
|||||||
* Sometimes, before we decide whether to proceed or to fail, we must check
|
* Sometimes, before we decide whether to proceed or to fail, we must check
|
||||||
* that an entry was not already brought back from swap by a racing thread.
|
* that an entry was not already brought back from swap by a racing thread.
|
||||||
*
|
*
|
||||||
* Checking page is not enough: by the time a SwapCache page is locked, it
|
* Checking folio is not enough: by the time a swapcache folio is locked, it
|
||||||
* might be reused, and again be SwapCache, using the same swap as before.
|
* might be reused, and again be swapcache, using the same swap as before.
|
||||||
*/
|
*/
|
||||||
static bool shmem_confirm_swap(struct address_space *mapping,
|
static bool shmem_confirm_swap(struct address_space *mapping,
|
||||||
pgoff_t index, swp_entry_t swap)
|
pgoff_t index, swp_entry_t swap)
|
||||||
@ -1965,9 +1965,10 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
|
|||||||
|
|
||||||
if (unlikely(error)) {
|
if (unlikely(error)) {
|
||||||
/*
|
/*
|
||||||
* Is this possible? I think not, now that our callers check
|
* Is this possible? I think not, now that our callers
|
||||||
* both PageSwapCache and page_private after getting page lock;
|
* check both the swapcache flag and folio->private
|
||||||
* but be defensive. Reverse old to newpage for clear and free.
|
* after getting the folio lock; but be defensive.
|
||||||
|
* Reverse old to newpage for clear and free.
|
||||||
*/
|
*/
|
||||||
old = new;
|
old = new;
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user