btrfs: convert btrfs_read_merkle_tree_page() to use a folio

Remove a number of hidden calls to compound_head() by using a folio
throughout.  Also follow core kernel coding style by adding the folio to
the page cache immediately after allocation instead of doing the read
first, then adding it to the page cache.  This ordering makes subsequent
readers block waiting for the first reader instead of duplicating the
work only to throw it away when they find out they lost the race.

Reviewed-by: Boris Burkov <boris@bur.io>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-08-14 18:52:08 +01:00 committed by David Sterba
parent 5facccc940
commit 06ed09351b

View File

@ -715,7 +715,7 @@ static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
pgoff_t index, pgoff_t index,
unsigned long num_ra_pages) unsigned long num_ra_pages)
{ {
struct page *page; struct folio *folio;
u64 off = (u64)index << PAGE_SHIFT; u64 off = (u64)index << PAGE_SHIFT;
loff_t merkle_pos = merkle_file_pos(inode); loff_t merkle_pos = merkle_file_pos(inode);
int ret; int ret;
@ -726,29 +726,36 @@ static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
return ERR_PTR(-EFBIG); return ERR_PTR(-EFBIG);
index += merkle_pos >> PAGE_SHIFT; index += merkle_pos >> PAGE_SHIFT;
again: again:
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED); folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
if (page) { if (!IS_ERR(folio)) {
if (PageUptodate(page)) if (folio_test_uptodate(folio))
return page; goto out;
lock_page(page); folio_lock(folio);
/* /* If it's not uptodate after we have the lock, we got a read error. */
* We only insert uptodate pages, so !Uptodate has to be if (!folio_test_uptodate(folio)) {
* an error folio_unlock(folio);
*/ folio_put(folio);
if (!PageUptodate(page)) {
unlock_page(page);
put_page(page);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
unlock_page(page); folio_unlock(folio);
return page; goto out;
} }
page = __page_cache_alloc(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); folio = filemap_alloc_folio(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS),
if (!page) 0);
if (!folio)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = filemap_add_folio(inode->i_mapping, folio, index, GFP_NOFS);
if (ret) {
folio_put(folio);
/* Did someone else insert a folio here? */
if (ret == -EEXIST)
goto again;
return ERR_PTR(ret);
}
/* /*
* Merkle item keys are indexed from byte 0 in the merkle tree. * Merkle item keys are indexed from byte 0 in the merkle tree.
* They have the form: * They have the form:
@ -756,28 +763,19 @@ again:
* [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ] * [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
*/ */
ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off, ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
page_address(page), PAGE_SIZE, page); folio_address(folio), PAGE_SIZE, &folio->page);
if (ret < 0) { if (ret < 0) {
put_page(page); folio_put(folio);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
if (ret < PAGE_SIZE) if (ret < PAGE_SIZE)
memzero_page(page, ret, PAGE_SIZE - ret); folio_zero_segment(folio, ret, PAGE_SIZE);
SetPageUptodate(page); folio_mark_uptodate(folio);
ret = add_to_page_cache_lru(page, inode->i_mapping, index, GFP_NOFS); folio_unlock(folio);
if (!ret) { out:
/* Inserted and ready for fsverity */ return folio_file_page(folio, index);
unlock_page(page);
} else {
put_page(page);
/* Did someone race us into inserting this page? */
if (ret == -EEXIST)
goto again;
page = ERR_PTR(ret);
}
return page;
} }
/* /*