btrfs: convert btrfs_mark_ordered_io_finished() to take a folio

We only need a folio now, make it take a folio as an argument and update
all of the callers.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2024-07-24 15:57:10 -04:00 committed by David Sterba
parent aef665d69a
commit a79228011c
4 changed files with 14 additions and 14 deletions

View File

@ -1428,8 +1428,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
u32 iosize;
if (cur >= i_size) {
btrfs_mark_ordered_io_finished(inode, &folio->page, cur,
len, true);
btrfs_mark_ordered_io_finished(inode, folio, cur, len,
true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
@ -1568,7 +1568,7 @@ done:
folio_end_writeback(folio);
}
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), &folio->page,
btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
page_start, PAGE_SIZE, !ret);
mapping_set_error(folio->mapping, ret);
}
@ -2330,7 +2330,7 @@ void extent_write_locked_range(struct inode *inode, const struct page *locked_pa
btrfs_folio_clear_writeback(fs_info, folio, cur, cur_len);
}
if (ret) {
btrfs_mark_ordered_io_finished(BTRFS_I(inode), &folio->page,
btrfs_mark_ordered_io_finished(BTRFS_I(inode), folio,
cur, cur_len, !ret);
mapping_set_error(mapping, ret);
}

View File

@ -1144,7 +1144,8 @@ static void submit_uncompressed_range(struct btrfs_inode *inode,
set_page_writeback(locked_page);
end_page_writeback(locked_page);
btrfs_mark_ordered_io_finished(inode, locked_page,
btrfs_mark_ordered_io_finished(inode,
page_folio(locked_page),
page_start, PAGE_SIZE,
!ret);
mapping_set_error(locked_page->mapping, ret);
@ -2802,8 +2803,8 @@ out_page:
* to reflect the errors and clean the page.
*/
mapping_set_error(page->mapping, ret);
btrfs_mark_ordered_io_finished(inode, page, page_start,
PAGE_SIZE, !ret);
btrfs_mark_ordered_io_finished(inode, page_folio(page),
page_start, PAGE_SIZE, !ret);
clear_page_dirty_for_io(page);
}
btrfs_folio_clear_checked(fs_info, page_folio(page), page_start, PAGE_SIZE);

View File

@ -449,8 +449,8 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
/*
* Mark all ordered extents io inside the specified range finished.
*
* @page: The involved page for the operation.
* For uncompressed buffered IO, the page status also needs to be
* @folio: The involved folio for the operation.
* For uncompressed buffered IO, the folio status also needs to be
* updated to indicate whether the pending ordered io is finished.
* Can be NULL for direct IO and compressed write.
* For these cases, callers are ensured they won't execute the
@ -460,7 +460,7 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
* extent(s) covering it.
*/
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
struct page *page, u64 file_offset,
struct folio *folio, u64 file_offset,
u64 num_bytes, bool uptodate)
{
struct rb_node *node;
@ -524,8 +524,7 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
ASSERT(end + 1 - cur < U32_MAX);
len = end + 1 - cur;
if (can_finish_ordered_extent(entry, page_folio(page), cur, len,
uptodate)) {
if (can_finish_ordered_extent(entry, folio, cur, len, uptodate)) {
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
btrfs_queue_ordered_fn(entry);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);

View File

@ -166,8 +166,8 @@ void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
struct folio *folio, u64 file_offset, u64 len,
bool uptodate);
void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
struct page *page, u64 file_offset,
u64 num_bytes, bool uptodate);
struct folio *folio, u64 file_offset,
u64 num_bytes, bool uptodate);
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);