f2fs: use round_up to enhance calculation

.i_cluster_size should be power of 2, so we can use round_up() instead
of roundup() to enhance the calculation.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2020-04-08 19:55:17 +08:00 committed by Jaegeuk Kim
parent c75488fb4d
commit 4fec3fc026

View File

@ -742,16 +742,9 @@ int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
* for compressed file, only support cluster size
* aligned truncation.
*/
if (f2fs_compressed_file(inode)) {
size_t cluster_shift = PAGE_SHIFT +
F2FS_I(inode)->i_log_cluster_size;
size_t cluster_mask = (1 << cluster_shift) - 1;
free_from = from >> cluster_shift;
if (from & cluster_mask)
free_from++;
free_from <<= cluster_shift;
}
if (f2fs_compressed_file(inode))
free_from = round_up(from,
F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
#endif
err = f2fs_do_truncate_blocks(inode, free_from, lock);
@ -3563,7 +3556,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = roundup(count, F2FS_I(inode)->i_cluster_size);
count = round_up(count, F2FS_I(inode)->i_cluster_size);
ret = release_compress_blocks(&dn, count);
@ -3715,7 +3708,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = roundup(count, F2FS_I(inode)->i_cluster_size);
count = round_up(count, F2FS_I(inode)->i_cluster_size);
ret = reserve_compress_blocks(&dn, count);