2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00

f2fs: Convert to release_folio

While converting f2fs_release_page() to f2fs_release_folio(), cache the
sb_info so we don't need to retrieve it twice, and remove the redundant
call to set_page_private().  The use of folios should be pushed further
into f2fs from here.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-04-30 23:41:46 -04:00
parent 3c402f1543
commit c26cd04586
5 changed files with 21 additions and 19 deletions

View File

@ -468,7 +468,7 @@ const struct address_space_operations f2fs_meta_aops = {
.writepages = f2fs_write_meta_pages,
.dirty_folio = f2fs_dirty_meta_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
.release_folio = f2fs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif

View File

@ -1746,7 +1746,7 @@ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
}
const struct address_space_operations f2fs_compress_aops = {
.releasepage = f2fs_release_page,
.release_folio = f2fs_release_folio,
.invalidate_folio = f2fs_invalidate_folio,
};

View File

@ -3528,28 +3528,30 @@ void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
folio_detach_private(folio);
}
int f2fs_release_page(struct page *page, gfp_t wait)
bool f2fs_release_folio(struct folio *folio, gfp_t wait)
{
/* If this is dirty page, keep PagePrivate */
if (PageDirty(page))
return 0;
struct f2fs_sb_info *sbi;
/* If this is dirty folio, keep private data */
if (folio_test_dirty(folio))
return false;
/* This is atomic written page, keep Private */
if (page_private_atomic(page))
return 0;
if (page_private_atomic(&folio->page))
return false;
if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
struct inode *inode = page->mapping->host;
sbi = F2FS_M_SB(folio->mapping);
if (test_opt(sbi, COMPRESS_CACHE)) {
struct inode *inode = folio->mapping->host;
if (inode->i_ino == F2FS_COMPRESS_INO(F2FS_I_SB(inode)))
clear_page_private_data(page);
if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
clear_page_private_data(&folio->page);
}
clear_page_private_gcing(page);
clear_page_private_gcing(&folio->page);
detach_page_private(page);
set_page_private(page, 0);
return 1;
folio_detach_private(folio);
return true;
}
static bool f2fs_dirty_data_folio(struct address_space *mapping,
@ -3944,7 +3946,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.write_end = f2fs_write_end,
.dirty_folio = f2fs_dirty_data_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
.release_folio = f2fs_release_folio,
.direct_IO = noop_direct_IO,
.bmap = f2fs_bmap,
.swap_activate = f2fs_swap_activate,

View File

@ -3768,7 +3768,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
int compr_blocks, bool allow_balance);
void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int f2fs_release_page(struct page *page, gfp_t wait);
bool f2fs_release_folio(struct folio *folio, gfp_t wait);
#ifdef CONFIG_MIGRATION
int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode);

View File

@ -2165,7 +2165,7 @@ const struct address_space_operations f2fs_node_aops = {
.writepages = f2fs_write_node_pages,
.dirty_folio = f2fs_dirty_node_folio,
.invalidate_folio = f2fs_invalidate_folio,
.releasepage = f2fs_release_page,
.release_folio = f2fs_release_folio,
#ifdef CONFIG_MIGRATION
.migratepage = f2fs_migrate_page,
#endif