mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
filemap: Add filemap_unaccount_folio()
Replace unaccount_page_cache_page() with filemap_unaccount_folio(). The bug handling path could be a bit more robust (eg taking into account the mapcounts of tail pages), but it's really never supposed to happen. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
parent
a548b61583
commit
621db4880d
@ -884,11 +884,6 @@ static inline void __set_page_dirty(struct page *page,
|
|||||||
}
|
}
|
||||||
void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
|
void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
|
||||||
struct bdi_writeback *wb);
|
struct bdi_writeback *wb);
|
||||||
static inline void account_page_cleaned(struct page *page,
|
|
||||||
struct address_space *mapping, struct bdi_writeback *wb)
|
|
||||||
{
|
|
||||||
return folio_account_cleaned(page_folio(page), mapping, wb);
|
|
||||||
}
|
|
||||||
void __folio_cancel_dirty(struct folio *folio);
|
void __folio_cancel_dirty(struct folio *folio);
|
||||||
static inline void folio_cancel_dirty(struct folio *folio)
|
static inline void folio_cancel_dirty(struct folio *folio)
|
||||||
{
|
{
|
||||||
|
70
mm/filemap.c
70
mm/filemap.c
@ -145,74 +145,74 @@ static void page_cache_delete(struct address_space *mapping,
|
|||||||
mapping->nrpages -= nr;
|
mapping->nrpages -= nr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unaccount_page_cache_page(struct address_space *mapping,
|
static void filemap_unaccount_folio(struct address_space *mapping,
|
||||||
struct page *page)
|
struct folio *folio)
|
||||||
{
|
{
|
||||||
int nr;
|
long nr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if we're uptodate, flush out into the cleancache, otherwise
|
* if we're uptodate, flush out into the cleancache, otherwise
|
||||||
* invalidate any existing cleancache entries. We can't leave
|
* invalidate any existing cleancache entries. We can't leave
|
||||||
* stale data around in the cleancache once our page is gone
|
* stale data around in the cleancache once our page is gone
|
||||||
*/
|
*/
|
||||||
if (PageUptodate(page) && PageMappedToDisk(page))
|
if (folio_test_uptodate(folio) && folio_test_mappedtodisk(folio))
|
||||||
cleancache_put_page(page);
|
cleancache_put_page(&folio->page);
|
||||||
else
|
else
|
||||||
cleancache_invalidate_page(mapping, page);
|
cleancache_invalidate_page(mapping, &folio->page);
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
|
||||||
VM_BUG_ON_PAGE(page_mapped(page), page);
|
if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
|
||||||
if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
|
|
||||||
int mapcount;
|
int mapcount;
|
||||||
|
|
||||||
pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
|
pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
|
||||||
current->comm, page_to_pfn(page));
|
current->comm, folio_pfn(folio));
|
||||||
dump_page(page, "still mapped when deleted");
|
dump_page(&folio->page, "still mapped when deleted");
|
||||||
dump_stack();
|
dump_stack();
|
||||||
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
|
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
|
||||||
|
|
||||||
mapcount = page_mapcount(page);
|
mapcount = page_mapcount(&folio->page);
|
||||||
if (mapping_exiting(mapping) &&
|
if (mapping_exiting(mapping) &&
|
||||||
page_count(page) >= mapcount + 2) {
|
folio_ref_count(folio) >= mapcount + 2) {
|
||||||
/*
|
/*
|
||||||
* All vmas have already been torn down, so it's
|
* All vmas have already been torn down, so it's
|
||||||
* a good bet that actually the page is unmapped,
|
* a good bet that actually the folio is unmapped,
|
||||||
* and we'd prefer not to leak it: if we're wrong,
|
* and we'd prefer not to leak it: if we're wrong,
|
||||||
* some other bad page check should catch it later.
|
* some other bad page check should catch it later.
|
||||||
*/
|
*/
|
||||||
page_mapcount_reset(page);
|
page_mapcount_reset(&folio->page);
|
||||||
page_ref_sub(page, mapcount);
|
folio_ref_sub(folio, mapcount);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* hugetlb pages do not participate in page cache accounting. */
|
/* hugetlb folios do not participate in page cache accounting. */
|
||||||
if (PageHuge(page))
|
if (folio_test_hugetlb(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nr = thp_nr_pages(page);
|
nr = folio_nr_pages(folio);
|
||||||
|
|
||||||
__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
|
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
|
||||||
if (PageSwapBacked(page)) {
|
if (folio_test_swapbacked(folio)) {
|
||||||
__mod_lruvec_page_state(page, NR_SHMEM, -nr);
|
__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
|
||||||
if (PageTransHuge(page))
|
if (folio_test_pmd_mappable(folio))
|
||||||
__mod_lruvec_page_state(page, NR_SHMEM_THPS, -nr);
|
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
|
||||||
} else if (PageTransHuge(page)) {
|
} else if (folio_test_pmd_mappable(folio)) {
|
||||||
__mod_lruvec_page_state(page, NR_FILE_THPS, -nr);
|
__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
|
||||||
filemap_nr_thps_dec(mapping);
|
filemap_nr_thps_dec(mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point page must be either written or cleaned by
|
* At this point folio must be either written or cleaned by
|
||||||
* truncate. Dirty page here signals a bug and loss of
|
* truncate. Dirty folio here signals a bug and loss of
|
||||||
* unwritten data.
|
* unwritten data.
|
||||||
*
|
*
|
||||||
* This fixes dirty accounting after removing the page entirely
|
* This fixes dirty accounting after removing the folio entirely
|
||||||
* but leaves PageDirty set: it has no effect for truncated
|
* but leaves the dirty flag set: it has no effect for truncated
|
||||||
* page and anyway will be cleared before returning page into
|
* folio and anyway will be cleared before returning folio to
|
||||||
* buddy allocator.
|
* buddy allocator.
|
||||||
*/
|
*/
|
||||||
if (WARN_ON_ONCE(PageDirty(page)))
|
if (WARN_ON_ONCE(folio_test_dirty(folio)))
|
||||||
account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
|
folio_account_cleaned(folio, mapping,
|
||||||
|
inode_to_wb(mapping->host));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -227,7 +227,7 @@ void __delete_from_page_cache(struct page *page, void *shadow)
|
|||||||
|
|
||||||
trace_mm_filemap_delete_from_page_cache(page);
|
trace_mm_filemap_delete_from_page_cache(page);
|
||||||
|
|
||||||
unaccount_page_cache_page(mapping, page);
|
filemap_unaccount_folio(mapping, folio);
|
||||||
page_cache_delete(mapping, folio, shadow);
|
page_cache_delete(mapping, folio, shadow);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -348,7 +348,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
|
|||||||
for (i = 0; i < pagevec_count(pvec); i++) {
|
for (i = 0; i < pagevec_count(pvec); i++) {
|
||||||
trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
|
trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
|
||||||
|
|
||||||
unaccount_page_cache_page(mapping, pvec->pages[i]);
|
filemap_unaccount_folio(mapping, page_folio(pvec->pages[i]));
|
||||||
}
|
}
|
||||||
page_cache_delete_batch(mapping, pvec);
|
page_cache_delete_batch(mapping, pvec);
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
Loading…
Reference in New Issue
Block a user