Merge patch series "Filesystem page flags cleanup"

Matthew Wilcox (Oracle) <willy@infradead.org> says:

The first four patches continue the work begun in 02e1960aaf to make
the mappedtodisk/owner_2 flag available to filesystems which don't use
buffer heads. The last two remove uses of Private2 (we're achingly close
to being rid of it entirely, but that doesn't seem like it'll land this
merge window).

* patches from https://lore.kernel.org/r/20241002040111.1023018-1-willy@infradead.org:
  migrate: Remove references to Private2
  ceph: Remove call to PagePrivate2()
  btrfs: Switch from using the private_2 flag to owner_2
  mm: Remove PageMappedToDisk
  nilfs2: Convert nilfs_copy_buffer() to use folios
  fs: Move clearing of mappedtodisk to buffer.c

Link: https://lore.kernel.org/r/20241002040111.1023018-1-willy@infradead.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christian Brauner 2024-10-04 09:24:28 +02:00
commit c6bbfc7ce1
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
9 changed files with 36 additions and 41 deletions

View File

@ -744,16 +744,11 @@ const char *btrfs_super_csum_driver(u16 csum_type);
size_t __attribute_const__ btrfs_get_num_csums(void);
/*
* We use page status Private2 to indicate there is an ordered extent with
* We use folio flag owner_2 to indicate there is an ordered extent with
* unfinished IO.
*
* Rename the Private2 accessors to Ordered, to improve readability.
*/
#define PageOrdered(page) PagePrivate2(page)
#define SetPageOrdered(page) SetPagePrivate2(page)
#define ClearPageOrdered(page) ClearPagePrivate2(page)
#define folio_test_ordered(folio) folio_test_private_2(folio)
#define folio_set_ordered(folio) folio_set_private_2(folio)
#define folio_clear_ordered(folio) folio_clear_private_2(folio)
#define folio_test_ordered(folio) folio_test_owner_2(folio)
#define folio_set_ordered(folio) folio_set_owner_2(folio)
#define folio_clear_ordered(folio) folio_clear_owner_2(folio)
#endif

View File

@ -1513,7 +1513,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* (which the caller expects to stay locked), don't clear any
* dirty bits and don't set any writeback bits
*
* Do set the Ordered (Private2) bit so we know this page was
* Do set the Ordered flag so we know this page was
* properly setup for writepage.
*/
page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
@ -7292,7 +7292,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
*
* But already submitted bio can still be finished on this folio.
* Furthermore, endio function won't skip folio which has Ordered
* (Private2) already cleared, so it's possible for endio and
* already cleared, so it's possible for endio and
* invalidate_folio to do the same ordered extent accounting twice
* on one folio.
*
@ -7358,7 +7358,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
range_len = range_end + 1 - cur;
if (!btrfs_folio_test_ordered(fs_info, folio, cur, range_len)) {
/*
* If Ordered (Private2) is cleared, it means endio has
* If Ordered is cleared, it means endio has
* already been executed for the range.
* We can't delete the extent states as
* btrfs_finish_ordered_io() may still use some of them.
@ -7431,7 +7431,7 @@ next:
}
/*
* We have iterated through all ordered extents of the page, the page
* should not have Ordered (Private2) anymore, or the above iteration
* should not have Ordered anymore, or the above iteration
* did something wrong.
*/
ASSERT(!folio_test_ordered(folio));

View File

@ -346,10 +346,10 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
ASSERT(file_offset + len <= folio_pos(folio) + folio_size(folio));
/*
* Ordered (Private2) bit indicates whether we still have
* Ordered flag indicates whether we still have
* pending io unfinished for the ordered extent.
*
* If there's no such bit, we need to skip to next range.
* If it's not set, we need to skip to next range.
*/
if (!btrfs_folio_test_ordered(fs_info, folio, file_offset, len))
return false;

View File

@ -1649,6 +1649,7 @@ void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
if (length == folio_size(folio))
filemap_release_folio(folio, 0);
out:
folio_clear_mappedtodisk(folio);
return;
}
EXPORT_SYMBOL(block_invalidate_folio);

View File

@ -1051,7 +1051,9 @@ get_more_pages:
if (!nr_folios && !locked_pages)
break;
for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
page = &fbatch.folios[i]->page;
struct folio *folio = fbatch.folios[i];
page = &folio->page;
doutc(cl, "? %p idx %lu\n", page, page->index);
if (locked_pages == 0)
lock_page(page); /* first page */
@ -1078,8 +1080,6 @@ get_more_pages:
continue;
}
if (page_offset(page) >= ceph_wbc.i_size) {
struct folio *folio = page_folio(page);
doutc(cl, "folio at %lu beyond eof %llu\n",
folio->index, ceph_wbc.i_size);
if ((ceph_wbc.size_stable ||
@ -1095,16 +1095,16 @@ get_more_pages:
unlock_page(page);
break;
}
if (PageWriteback(page) ||
PagePrivate2(page) /* [DEPRECATED] */) {
if (folio_test_writeback(folio) ||
folio_test_private_2(folio) /* [DEPRECATED] */) {
if (wbc->sync_mode == WB_SYNC_NONE) {
doutc(cl, "%p under writeback\n", page);
unlock_page(page);
doutc(cl, "%p under writeback\n", folio);
folio_unlock(folio);
continue;
}
doutc(cl, "waiting on writeback %p\n", page);
wait_on_page_writeback(page);
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
doutc(cl, "waiting on writeback %p\n", folio);
folio_wait_writeback(folio);
folio_wait_private_2(folio); /* [DEPRECATED] */
}
if (!clear_page_dirty_for_io(page)) {

View File

@ -98,16 +98,16 @@ void nilfs_forget_buffer(struct buffer_head *bh)
*/
void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
{
void *kaddr0, *kaddr1;
void *saddr, *daddr;
unsigned long bits;
struct page *spage = sbh->b_page, *dpage = dbh->b_page;
struct folio *sfolio = sbh->b_folio, *dfolio = dbh->b_folio;
struct buffer_head *bh;
kaddr0 = kmap_local_page(spage);
kaddr1 = kmap_local_page(dpage);
memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
kunmap_local(kaddr1);
kunmap_local(kaddr0);
saddr = kmap_local_folio(sfolio, bh_offset(sbh));
daddr = kmap_local_folio(dfolio, bh_offset(dbh));
memcpy(daddr, saddr, sbh->b_size);
kunmap_local(daddr);
kunmap_local(saddr);
dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
dbh->b_blocknr = sbh->b_blocknr;
@ -121,13 +121,13 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
unlock_buffer(bh);
}
if (bits & BIT(BH_Uptodate))
SetPageUptodate(dpage);
folio_mark_uptodate(dfolio);
else
ClearPageUptodate(dpage);
folio_clear_uptodate(dfolio);
if (bits & BIT(BH_Mapped))
SetPageMappedToDisk(dpage);
folio_set_mappedtodisk(dfolio);
else
ClearPageMappedToDisk(dpage);
folio_clear_mappedtodisk(dfolio);
}
/**

View File

@ -543,7 +543,7 @@ FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
* - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
PAGEFLAG(Private, private, PF_ANY)
PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE)
/* owner_2 can be set on tail pages for anon memory */
FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
@ -554,7 +554,7 @@ FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
*/
TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)

View File

@ -472,7 +472,7 @@ static int folio_expected_refs(struct address_space *mapping,
* The number of remaining references must be:
* 1 for anonymous folios without a mapping
* 2 for folios with a mapping
* 3 for folios with a mapping and PagePrivate/PagePrivate2 set.
* 3 for folios with a mapping and the private flag set.
*/
static int __folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int expected_count)
@ -786,7 +786,7 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
* @mode: How to migrate the page.
*
* Common logic to directly migrate a single LRU folio suitable for
* folios that do not use PagePrivate/PagePrivate2.
* folios that do not have private data.
*
* Folios are locked upon entry and exit.
*/

View File

@ -166,7 +166,6 @@ static void truncate_cleanup_folio(struct folio *folio)
* Hence dirty accounting check is placed after invalidation.
*/
folio_cancel_dirty(folio);
folio_clear_mappedtodisk(folio);
}
int truncate_inode_folio(struct address_space *mapping, struct folio *folio)