mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
f2fs: compress: avoid duplicate counting of valid blocks when read compressed file
Since cluster is basic unit of compression, one cluster is compressed or not, so we can calculate valid blocks only for first page in cluster, the other pages just skip. Signed-off-by: Fengnan Chang <changfengnan@vivo.com> Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
65ddf65648
commit
a2649315bc
@ -2299,6 +2299,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
|
||||
.nr_rpages = 0,
|
||||
.nr_cpages = 0,
|
||||
};
|
||||
pgoff_t nc_cluster_idx = NULL_CLUSTER;
|
||||
#endif
|
||||
unsigned nr_pages = rac ? readahead_count(rac) : 1;
|
||||
unsigned max_nr_pages = nr_pages;
|
||||
@ -2331,12 +2332,23 @@ static int f2fs_mpage_readpages(struct inode *inode,
|
||||
if (ret)
|
||||
goto set_error_page;
|
||||
}
|
||||
ret = f2fs_is_compressed_cluster(inode, page->index);
|
||||
if (ret < 0)
|
||||
goto set_error_page;
|
||||
else if (!ret)
|
||||
goto read_single_page;
|
||||
if (cc.cluster_idx == NULL_CLUSTER) {
|
||||
if (nc_cluster_idx ==
|
||||
page->index >> cc.log_cluster_size) {
|
||||
goto read_single_page;
|
||||
}
|
||||
|
||||
ret = f2fs_is_compressed_cluster(inode, page->index);
|
||||
if (ret < 0)
|
||||
goto set_error_page;
|
||||
else if (!ret) {
|
||||
nc_cluster_idx =
|
||||
page->index >> cc.log_cluster_size;
|
||||
goto read_single_page;
|
||||
}
|
||||
|
||||
nc_cluster_idx = NULL_CLUSTER;
|
||||
}
|
||||
ret = f2fs_init_compress_ctx(&cc);
|
||||
if (ret)
|
||||
goto set_error_page;
|
||||
|
Loading…
Reference in New Issue
Block a user