f2fs: change to use rwsem for gc_mutex

Mutex lock won't serialize callers, in order to avoid starving of unlucky
caller, let's use rwsem lock instead.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2020-01-14 19:36:50 +08:00 committed by Jaegeuk Kim
parent d7b0a23d81
commit fb24fea75c
5 changed files with 27 additions and 24 deletions

View File

@ -1391,7 +1391,10 @@ struct f2fs_sb_info {
struct f2fs_mount_info mount_opt; /* mount options */ struct f2fs_mount_info mount_opt; /* mount options */
/* for cleaning operations */ /* for cleaning operations */
struct mutex gc_mutex; /* mutex for GC */ struct rw_semaphore gc_lock; /*
* semaphore for GC, avoid
* race between GC and GC or CP
*/
struct f2fs_gc_kthread *gc_thread; /* GC thread */ struct f2fs_gc_kthread *gc_thread; /* GC thread */
unsigned int cur_victim_sec; /* current victim section num */ unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */ unsigned int gc_mode; /* current GC state */

View File

@ -1642,7 +1642,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
next_alloc: next_alloc:
if (has_not_enough_free_secs(sbi, 0, if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
err = f2fs_gc(sbi, true, false, NULL_SEGNO); err = f2fs_gc(sbi, true, false, NULL_SEGNO);
if (err && err != -ENODATA && err != -EAGAIN) if (err && err != -ENODATA && err != -EAGAIN)
goto out_err; goto out_err;
@ -2450,12 +2450,12 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
return ret; return ret;
if (!sync) { if (!sync) {
if (!mutex_trylock(&sbi->gc_mutex)) { if (!down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
} else { } else {
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
} }
ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
@ -2493,12 +2493,12 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
do_more: do_more:
if (!range.sync) { if (!range.sync) {
if (!mutex_trylock(&sbi->gc_mutex)) { if (!down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
} else { } else {
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
} }
ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
@ -2929,7 +2929,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
end_segno = min(start_segno + range.segments, dev_end_segno); end_segno = min(start_segno + range.segments, dev_end_segno);
while (start_segno < end_segno) { while (start_segno < end_segno) {
if (!mutex_trylock(&sbi->gc_mutex)) { if (!down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }

View File

@ -78,18 +78,18 @@ static int gc_thread_func(void *data)
*/ */
if (sbi->gc_mode == GC_URGENT) { if (sbi->gc_mode == GC_URGENT) {
wait_ms = gc_th->urgent_sleep_time; wait_ms = gc_th->urgent_sleep_time;
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
goto do_gc; goto do_gc;
} }
if (!mutex_trylock(&sbi->gc_mutex)) { if (!down_write_trylock(&sbi->gc_lock)) {
stat_other_skip_bggc_count(sbi); stat_other_skip_bggc_count(sbi);
goto next; goto next;
} }
if (!is_idle(sbi, GC_TIME)) { if (!is_idle(sbi, GC_TIME)) {
increase_sleep_time(gc_th, &wait_ms); increase_sleep_time(gc_th, &wait_ms);
mutex_unlock(&sbi->gc_mutex); up_write(&sbi->gc_lock);
stat_io_skip_bggc_count(sbi); stat_io_skip_bggc_count(sbi);
goto next; goto next;
} }
@ -1370,7 +1370,7 @@ stop:
reserved_segments(sbi), reserved_segments(sbi),
prefree_segments(sbi)); prefree_segments(sbi));
mutex_unlock(&sbi->gc_mutex); up_write(&sbi->gc_lock);
put_gc_inode(&gc_list); put_gc_inode(&gc_list);
@ -1409,9 +1409,9 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
}; };
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
do_garbage_collect(sbi, segno, &gc_list, FG_GC); do_garbage_collect(sbi, segno, &gc_list, FG_GC);
mutex_unlock(&sbi->gc_mutex); up_write(&sbi->gc_lock);
put_gc_inode(&gc_list); put_gc_inode(&gc_list);
if (get_valid_blocks(sbi, segno, true)) if (get_valid_blocks(sbi, segno, true))

View File

@ -504,7 +504,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
* dir/node pages without enough free segments. * dir/node pages without enough free segments.
*/ */
if (has_not_enough_free_secs(sbi, 0, 0)) { if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
f2fs_gc(sbi, false, false, NULL_SEGNO); f2fs_gc(sbi, false, false, NULL_SEGNO);
} }
} }
@ -2860,9 +2860,9 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (sbi->discard_blks == 0) if (sbi->discard_blks == 0)
goto out; goto out;
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
err = f2fs_write_checkpoint(sbi, &cpc); err = f2fs_write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex); up_write(&sbi->gc_lock);
if (err) if (err)
goto out; goto out;

View File

@ -1238,9 +1238,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
cpc.reason = __get_cp_reason(sbi); cpc.reason = __get_cp_reason(sbi);
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
err = f2fs_write_checkpoint(sbi, &cpc); err = f2fs_write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex); up_write(&sbi->gc_lock);
} }
f2fs_trace_ios(NULL, 1); f2fs_trace_ios(NULL, 1);
@ -1621,7 +1621,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
f2fs_update_time(sbi, DISABLE_TIME); f2fs_update_time(sbi, DISABLE_TIME);
while (!f2fs_time_over(sbi, DISABLE_TIME)) { while (!f2fs_time_over(sbi, DISABLE_TIME)) {
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
err = f2fs_gc(sbi, true, false, NULL_SEGNO); err = f2fs_gc(sbi, true, false, NULL_SEGNO);
if (err == -ENODATA) { if (err == -ENODATA) {
err = 0; err = 0;
@ -1643,7 +1643,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
goto restore_flag; goto restore_flag;
} }
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
cpc.reason = CP_PAUSE; cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED); set_sbi_flag(sbi, SBI_CP_DISABLED);
err = f2fs_write_checkpoint(sbi, &cpc); err = f2fs_write_checkpoint(sbi, &cpc);
@ -1655,7 +1655,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
out_unlock: out_unlock:
mutex_unlock(&sbi->gc_mutex); up_write(&sbi->gc_lock);
restore_flag: restore_flag:
sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */ sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
return err; return err;
@ -1663,12 +1663,12 @@ restore_flag:
static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
{ {
mutex_lock(&sbi->gc_mutex); down_write(&sbi->gc_lock);
f2fs_dirty_to_prefree(sbi); f2fs_dirty_to_prefree(sbi);
clear_sbi_flag(sbi, SBI_CP_DISABLED); clear_sbi_flag(sbi, SBI_CP_DISABLED);
set_sbi_flag(sbi, SBI_IS_DIRTY); set_sbi_flag(sbi, SBI_IS_DIRTY);
mutex_unlock(&sbi->gc_mutex); up_write(&sbi->gc_lock);
f2fs_sync_fs(sbi->sb, 1); f2fs_sync_fs(sbi->sb, 1);
} }
@ -3398,7 +3398,7 @@ try_onemore:
/* init f2fs-specific super block info */ /* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block; sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex); init_rwsem(&sbi->gc_lock);
mutex_init(&sbi->writepages); mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex); mutex_init(&sbi->cp_mutex);
mutex_init(&sbi->resize_mutex); mutex_init(&sbi->resize_mutex);