mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 15:13:55 +08:00
f2fs: change i_compr_blocks of inode to atomic value
writepages() can be concurrently invoked for the same file by different threads such as a thread fsyncing the file and a kworker kernel thread. So, changing i_compr_blocks without protection is racy and we need to protect it by changing it with atomic type value. Plus, we don't need a 64bit value for i_compr_blocks, so just we will use a atomic value, not atomic64. Signed-off-by: Daeho Jeong <daehojeong@google.com> Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
32c0fec1aa
commit
c2759ebaf7
@ -807,7 +807,7 @@ struct f2fs_inode_info {
|
||||
struct timespec64 i_disk_time[4];/* inode disk times */
|
||||
|
||||
/* for file compress */
|
||||
u64 i_compr_blocks; /* # of compressed blocks */
|
||||
atomic_t i_compr_blocks; /* # of compressed blocks */
|
||||
unsigned char i_compress_algorithm; /* algorithm type */
|
||||
unsigned char i_log_cluster_size; /* log of cluster size */
|
||||
unsigned int i_cluster_size; /* cluster size */
|
||||
@ -3963,17 +3963,19 @@ static inline void set_compress_context(struct inode *inode)
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
}
|
||||
|
||||
static inline u64 f2fs_disable_compressed_file(struct inode *inode)
|
||||
static inline u32 f2fs_disable_compressed_file(struct inode *inode)
|
||||
{
|
||||
struct f2fs_inode_info *fi = F2FS_I(inode);
|
||||
u32 i_compr_blocks;
|
||||
|
||||
if (!f2fs_compressed_file(inode))
|
||||
return 0;
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
if (get_dirty_pages(inode))
|
||||
return 1;
|
||||
if (fi->i_compr_blocks)
|
||||
return fi->i_compr_blocks;
|
||||
i_compr_blocks = atomic_read(&fi->i_compr_blocks);
|
||||
if (i_compr_blocks)
|
||||
return i_compr_blocks;
|
||||
}
|
||||
|
||||
fi->i_flags &= ~F2FS_COMPR_FL;
|
||||
@ -4090,16 +4092,17 @@ static inline void f2fs_i_compr_blocks_update(struct inode *inode,
|
||||
u64 blocks, bool add)
|
||||
{
|
||||
int diff = F2FS_I(inode)->i_cluster_size - blocks;
|
||||
struct f2fs_inode_info *fi = F2FS_I(inode);
|
||||
|
||||
/* don't update i_compr_blocks if saved blocks were released */
|
||||
if (!add && !F2FS_I(inode)->i_compr_blocks)
|
||||
if (!add && !atomic_read(&fi->i_compr_blocks))
|
||||
return;
|
||||
|
||||
if (add) {
|
||||
F2FS_I(inode)->i_compr_blocks += diff;
|
||||
atomic_add(diff, &fi->i_compr_blocks);
|
||||
stat_add_compr_blocks(inode, diff);
|
||||
} else {
|
||||
F2FS_I(inode)->i_compr_blocks -= diff;
|
||||
atomic_sub(diff, &fi->i_compr_blocks);
|
||||
stat_sub_compr_blocks(inode, diff);
|
||||
}
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
|
@ -545,7 +545,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
|
||||
bool compressed_cluster = false;
|
||||
int cluster_index = 0, valid_blocks = 0;
|
||||
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
|
||||
bool released = !F2FS_I(dn->inode)->i_compr_blocks;
|
||||
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
|
||||
|
||||
if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
|
||||
base = get_extra_isize(dn->inode);
|
||||
@ -3424,7 +3424,7 @@ static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
|
||||
if (!f2fs_compressed_file(inode))
|
||||
return -EINVAL;
|
||||
|
||||
blocks = F2FS_I(inode)->i_compr_blocks;
|
||||
blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
|
||||
return put_user(blocks, (u64 __user *)arg);
|
||||
}
|
||||
|
||||
@ -3528,7 +3528,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
|
||||
inode->i_ctime = current_time(inode);
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
|
||||
if (!F2FS_I(inode)->i_compr_blocks)
|
||||
if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
|
||||
goto out;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
@ -3576,14 +3576,15 @@ out:
|
||||
|
||||
if (ret >= 0) {
|
||||
ret = put_user(released_blocks, (u64 __user *)arg);
|
||||
} else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
|
||||
} else if (released_blocks &&
|
||||
atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
|
||||
"iblocks=%llu, released=%u, compr_blocks=%llu, "
|
||||
"iblocks=%llu, released=%u, compr_blocks=%u, "
|
||||
"run fsck to fix.",
|
||||
__func__, inode->i_ino, inode->i_blocks,
|
||||
released_blocks,
|
||||
F2FS_I(inode)->i_compr_blocks);
|
||||
atomic_read(&F2FS_I(inode)->i_compr_blocks));
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -3671,7 +3672,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (F2FS_I(inode)->i_compr_blocks)
|
||||
if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
|
||||
goto out;
|
||||
|
||||
f2fs_balance_fs(F2FS_I_SB(inode), true);
|
||||
@ -3735,14 +3736,15 @@ out:
|
||||
|
||||
if (ret >= 0) {
|
||||
ret = put_user(reserved_blocks, (u64 __user *)arg);
|
||||
} else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
|
||||
} else if (reserved_blocks &&
|
||||
atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
|
||||
"iblocks=%llu, reserved=%u, compr_blocks=%llu, "
|
||||
"iblocks=%llu, reserved=%u, compr_blocks=%u, "
|
||||
"run fsck to fix.",
|
||||
__func__, inode->i_ino, inode->i_blocks,
|
||||
reserved_blocks,
|
||||
F2FS_I(inode)->i_compr_blocks);
|
||||
atomic_read(&F2FS_I(inode)->i_compr_blocks));
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -442,7 +442,8 @@ static int do_read_inode(struct inode *inode)
|
||||
(fi->i_flags & F2FS_COMPR_FL)) {
|
||||
if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
|
||||
i_log_cluster_size)) {
|
||||
fi->i_compr_blocks = le64_to_cpu(ri->i_compr_blocks);
|
||||
atomic_set(&fi->i_compr_blocks,
|
||||
le64_to_cpu(ri->i_compr_blocks));
|
||||
fi->i_compress_algorithm = ri->i_compress_algorithm;
|
||||
fi->i_log_cluster_size = ri->i_log_cluster_size;
|
||||
fi->i_cluster_size = 1 << fi->i_log_cluster_size;
|
||||
@ -460,7 +461,7 @@ static int do_read_inode(struct inode *inode)
|
||||
stat_inc_inline_inode(inode);
|
||||
stat_inc_inline_dir(inode);
|
||||
stat_inc_compr_inode(inode);
|
||||
stat_add_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
|
||||
stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -619,7 +620,8 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
|
||||
F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
|
||||
i_log_cluster_size)) {
|
||||
ri->i_compr_blocks =
|
||||
cpu_to_le64(F2FS_I(inode)->i_compr_blocks);
|
||||
cpu_to_le64(atomic_read(
|
||||
&F2FS_I(inode)->i_compr_blocks));
|
||||
ri->i_compress_algorithm =
|
||||
F2FS_I(inode)->i_compress_algorithm;
|
||||
ri->i_log_cluster_size =
|
||||
@ -768,7 +770,8 @@ no_delete:
|
||||
stat_dec_inline_dir(inode);
|
||||
stat_dec_inline_inode(inode);
|
||||
stat_dec_compr_inode(inode);
|
||||
stat_sub_compr_blocks(inode, F2FS_I(inode)->i_compr_blocks);
|
||||
stat_sub_compr_blocks(inode,
|
||||
atomic_read(&F2FS_I(inode)->i_compr_blocks));
|
||||
|
||||
if (likely(!f2fs_cp_error(sbi) &&
|
||||
!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
|
||||
|
@ -1026,6 +1026,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
|
||||
|
||||
/* Initialize f2fs-specific inode info */
|
||||
atomic_set(&fi->dirty_pages, 0);
|
||||
atomic_set(&fi->i_compr_blocks, 0);
|
||||
init_rwsem(&fi->i_sem);
|
||||
spin_lock_init(&fi->i_size_lock);
|
||||
INIT_LIST_HEAD(&fi->dirty_list);
|
||||
|
Loading…
Reference in New Issue
Block a user