mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
f2fs: compress: fix deadloop in f2fs_write_cache_pages()
With below mount option and testcase, it hangs kernel.
1. mount -t f2fs -o compress_log_size=5 /dev/vdb /mnt/f2fs
2. touch /mnt/f2fs/file
3. chattr +c /mnt/f2fs/file
4. dd if=/dev/zero of=/mnt/f2fs/file bs=1MB count=1
5. sync
6. dd if=/dev/zero of=/mnt/f2fs/file bs=111 count=11 conv=notrunc
7. sync
INFO: task sync:4788 blocked for more than 120 seconds.
Not tainted 6.5.0-rc1+ #322
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:sync state:D stack:0 pid:4788 ppid:509 flags:0x00000002
Call Trace:
<TASK>
__schedule+0x335/0xf80
schedule+0x6f/0xf0
wb_wait_for_completion+0x5e/0x90
sync_inodes_sb+0xd8/0x2a0
sync_inodes_one_sb+0x1d/0x30
iterate_supers+0x99/0xf0
ksys_sync+0x46/0xb0
__do_sys_sync+0x12/0x20
do_syscall_64+0x3f/0x90
entry_SYSCALL_64_after_hwframe+0x6e/0xd8
The reason is f2fs_all_cluster_page_ready() assumes that pages array should
cover at least one cluster, otherwise, it will always return false, result
in deadloop.
By default, pages array size is 16, and it can cover the case cluster_size
is equal or less than 16, for the case cluster_size is larger than 16, let's
allocate memory of pages array dynamically.
Fixes: 4c8ff7095b
("f2fs: support data compression")
Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
3669558bdf
commit
c5d3f9b764
@ -3023,7 +3023,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
|
||||
{
|
||||
int ret = 0;
|
||||
int done = 0, retry = 0;
|
||||
struct page *pages[F2FS_ONSTACK_PAGES];
|
||||
struct page *pages_local[F2FS_ONSTACK_PAGES];
|
||||
struct page **pages = pages_local;
|
||||
struct folio_batch fbatch;
|
||||
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
|
||||
struct bio *bio = NULL;
|
||||
@ -3047,6 +3048,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
|
||||
#endif
|
||||
int nr_folios, p, idx;
|
||||
int nr_pages;
|
||||
unsigned int max_pages = F2FS_ONSTACK_PAGES;
|
||||
pgoff_t index;
|
||||
pgoff_t end; /* Inclusive */
|
||||
pgoff_t done_index;
|
||||
@ -3056,6 +3058,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
|
||||
int submitted = 0;
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (f2fs_compressed_file(inode) &&
|
||||
1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
|
||||
pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
|
||||
cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
|
||||
max_pages = 1 << cc.log_cluster_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
folio_batch_init(&fbatch);
|
||||
|
||||
if (get_dirty_pages(mapping->host) <=
|
||||
@ -3101,7 +3112,7 @@ again:
|
||||
add_more:
|
||||
pages[nr_pages] = folio_page(folio, idx);
|
||||
folio_get(folio);
|
||||
if (++nr_pages == F2FS_ONSTACK_PAGES) {
|
||||
if (++nr_pages == max_pages) {
|
||||
index = folio->index + idx + 1;
|
||||
folio_batch_release(&fbatch);
|
||||
goto write;
|
||||
@ -3283,6 +3294,11 @@ next:
|
||||
if (bio)
|
||||
f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (pages != pages_local)
|
||||
kfree(pages);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user