f2fs: check free_sections for defragmentation

Fix wrong condition check for defragmentation of a file.

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Jaegeuk Kim 2016-09-01 12:02:51 -07:00
parent ed214a1183
commit 7f3037a5ec
5 changed files with 15 additions and 14 deletions

View File

@ -1293,7 +1293,7 @@ write:
if (!wbc->for_reclaim)
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0))
else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
err = -EAGAIN;
@ -1625,7 +1625,7 @@ repeat:
if (err)
goto fail;
if (need_balance && has_not_enough_free_secs(sbi, 0)) {
if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
unlock_page(page);
f2fs_balance_fs(sbi, true);
lock_page(page);

View File

@ -1961,7 +1961,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
* avoid defragment running in SSR mode when free section are allocated
* intensively
*/
if (has_not_enough_free_secs(sbi, sec_num)) {
if (has_not_enough_free_secs(sbi, 0, sec_num)) {
err = -EAGAIN;
goto out;
}

View File

@ -439,7 +439,7 @@ next_step:
struct node_info ni;
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
return;
if (check_valid_map(sbi, segno, off) == 0)
@ -715,7 +715,7 @@ next_step:
nid_t nid = le32_to_cpu(entry->nid);
/* stop BG_GC if there is not enough free sections. */
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
return;
if (check_valid_map(sbi, segno, off) == 0)
@ -916,7 +916,7 @@ gc_more:
goto stop;
}
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
gc_type = FG_GC;
/*
* If there is no victim and no prefree segment but still not
@ -927,7 +927,7 @@ gc_more:
prefree_segments(sbi)) {
write_checkpoint(sbi, &cpc);
segno = NULL_SEGNO;
} else if (has_not_enough_free_secs(sbi, 0)) {
} else if (has_not_enough_free_secs(sbi, 0, 0)) {
write_checkpoint(sbi, &cpc);
}
}
@ -944,7 +944,7 @@ gc_more:
sbi->cur_victim_sec = NULL_SEGNO;
if (!sync) {
if (has_not_enough_free_secs(sbi, sec_freed))
if (has_not_enough_free_secs(sbi, sec_freed, 0))
goto gc_more;
if (gc_type == FG_GC)

View File

@ -356,7 +356,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
if (has_not_enough_free_secs(sbi, 0)) {
if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
f2fs_gc(sbi, false);
}
@ -1278,7 +1278,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
return v_ops->get_victim(sbi,
&(curseg)->next_segno, BG_GC, type, SSR);
@ -1477,7 +1477,7 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
/* direct_io'ed data is aligned to the segment for better performance */
if (direct_io && curseg->next_blkoff &&
!has_not_enough_free_secs(sbi, 0))
!has_not_enough_free_secs(sbi, 0, 0))
__allocate_new_segments(sbi, type);
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

View File

@ -479,7 +479,8 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
reserved_sections(sbi) + 1);
}
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
int freed, int needed)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
@ -489,8 +490,8 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
reserved_sections(sbi));
return (free_sections(sbi) + freed) <=
(node_secs + 2 * dent_secs + reserved_sections(sbi) + needed);
}
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)