mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
f2fs: compress: fix to cover normal cluster write with cp_rwsem
When we overwrite compressed cluster w/ normal cluster, we should
not unlock cp_rwsem during f2fs_write_raw_pages(), otherwise data
will be corrupted if partial blocks were persisted before CP & SPOR,
due to cluster metadata wasn't updated atomically.
Fixes: 4c8ff7095b
("f2fs: support data compression")
Reviewed-by: Daeho Jeong <daehojeong@google.com>
Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
8a430dd49e
commit
fd244524c2
@ -1443,12 +1443,14 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
|
||||
}
|
||||
|
||||
static int f2fs_write_raw_pages(struct compress_ctx *cc,
|
||||
int *submitted,
|
||||
int *submitted_p,
|
||||
struct writeback_control *wbc,
|
||||
enum iostat_type io_type)
|
||||
{
|
||||
struct address_space *mapping = cc->inode->i_mapping;
|
||||
int _submitted, compr_blocks, ret, i;
|
||||
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
|
||||
int submitted, compr_blocks, i;
|
||||
int ret = 0;
|
||||
|
||||
compr_blocks = f2fs_compressed_blocks(cc);
|
||||
|
||||
@ -1463,6 +1465,10 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
|
||||
if (compr_blocks < 0)
|
||||
return compr_blocks;
|
||||
|
||||
/* overwrite compressed cluster w/ normal cluster */
|
||||
if (compr_blocks > 0)
|
||||
f2fs_lock_op(sbi);
|
||||
|
||||
for (i = 0; i < cc->cluster_size; i++) {
|
||||
if (!cc->rpages[i])
|
||||
continue;
|
||||
@ -1487,7 +1493,7 @@ continue_unlock:
|
||||
if (!clear_page_dirty_for_io(cc->rpages[i]))
|
||||
goto continue_unlock;
|
||||
|
||||
ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
|
||||
ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
|
||||
NULL, NULL, wbc, io_type,
|
||||
compr_blocks, false);
|
||||
if (ret) {
|
||||
@ -1495,26 +1501,29 @@ continue_unlock:
|
||||
unlock_page(cc->rpages[i]);
|
||||
ret = 0;
|
||||
} else if (ret == -EAGAIN) {
|
||||
ret = 0;
|
||||
/*
|
||||
* for quota file, just redirty left pages to
|
||||
* avoid deadlock caused by cluster update race
|
||||
* from foreground operation.
|
||||
*/
|
||||
if (IS_NOQUOTA(cc->inode))
|
||||
return 0;
|
||||
ret = 0;
|
||||
goto out;
|
||||
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
|
||||
goto retry_write;
|
||||
}
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*submitted += _submitted;
|
||||
*submitted_p += submitted;
|
||||
}
|
||||
|
||||
f2fs_balance_fs(F2FS_M_SB(mapping), true);
|
||||
out:
|
||||
if (compr_blocks > 0)
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
return 0;
|
||||
f2fs_balance_fs(sbi, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int f2fs_write_multi_pages(struct compress_ctx *cc,
|
||||
|
@ -2839,7 +2839,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
||||
.encrypted_page = NULL,
|
||||
.submitted = 0,
|
||||
.compr_blocks = compr_blocks,
|
||||
.need_lock = LOCK_RETRY,
|
||||
.need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
|
||||
.post_read = f2fs_post_read_required(inode) ? 1 : 0,
|
||||
.io_type = io_type,
|
||||
.io_wbc = wbc,
|
||||
@ -2920,6 +2920,7 @@ write:
|
||||
if (err == -EAGAIN) {
|
||||
err = f2fs_do_write_data_page(&fio);
|
||||
if (err == -EAGAIN) {
|
||||
f2fs_bug_on(sbi, compr_blocks);
|
||||
fio.need_lock = LOCK_REQ;
|
||||
err = f2fs_do_write_data_page(&fio);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user