mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-27 03:55:37 +08:00
f2fs update for 6.9-rc1
In this round, there are a number of updates on mainly two areas: Zoned block device support and Per-file compression. For example, we've found several issues to support Zoned block device especially having large sections regarding to GC and file pinning used for Android devices. In compression side, we've fixed many corner race conditions that had broken the design assumption. Enhancement: - Support file pinning for Zoned block device having large section - Enhance the data recovery after sudden power cut on Zoned block device - Add more error injection cases to easily detect the kernel panics - add a proc entry show the entire disk layout - Improve various error paths paniced by BUG_ON in block allocation and GC - support SEEK_DATA and SEEK_HOLE for compression files Bug fix: - fix to avoid use-after-free issue in f2fs_filemap_fault - fix some race conditions to break the atomic write design assumption - fix to truncate meta inode pages forcely - resolve various per-file compression issues wrt the space management and compression policies - fix some swap-related bugs In addition, we removed deprecated codes such as io_bits and heap_allocation, and also fixed minor error handling routines with neat debugging messages. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE00UqedjCtOrGVvQiQBSofoJIUNIFAmX4gS0ACgkQQBSofoJI UNLmgBAAg4mvbWjmJ5VbXs4zGLOgLRJYcY1sZRO5Ufg4LhWzoGRxL1Dru+TELw0t 1Ck2EQvP91XZ5weA5AZOfWbxcijy4+8L3P8L7ohOShudfACci0wQsx6IaUUWWylC ILA4+DkovpZrlu6th12Gj9QAM6TN9gdy3V1VLT5O/KmE1x6Pekwp2hQoIvVJRH5L I3KxOf5fTe3oWLvEN6m7yCz/8qGqz8+w0ae90UG0fqi0wVEuZJ99zsVPnuhu6uBo riFm2A6ra0I/JqoPyqn2QM6ApItM867ULo9EoyQVgq56Q1w31ENOJXsU9N7N4Wxt olgujH1SijkWk9ni57iKtMhR68e3Rs+pVsuNFmJuOPq0HASoggB66QRrVvCgM9JG z3D//CB2ONtX2XiKJMiTcX9VqIqrMw6L1eVxEZu0P96C3CS70MoBU69mdSR9Og2S 5nQXja3yzFhdk3thp6+wAJ3I04ZQkf3qoHZB+0chU2Xl1pV+5NIkBgBsSw8g/TY3 EIHMfK+TX0SBSNCvkUDEJ+Z8ZRID6tcbAquTSsBr6wxB+F9mq7onEvI8O7xwyH9W DU8xhymOE2QUoluNtyW7ww6HK913ripXIenI9LaYJnuj0XeDAcMIoPsgR7AGU5UG hshvirFdUdWRMTfXxNNUrvhOWI0qurQSVx+VV6Qb62DGqR5ofOw= =Qpvy -----END PGP SIGNATURE----- Merge tag 'f2fs-for-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs Pull f2fs update from Jaegeuk Kim: "In this round, there are a number of updates on mainly two areas: Zoned block device support and Per-file compression. For example, we've found several issues to support Zoned block device especially having large sections regarding to GC and file pinning used for Android devices. In compression side, we've fixed many corner race conditions that had broken the design assumption. Enhancements: - Support file pinning for Zoned block device having large section - Enhance the data recovery after sudden power cut on Zoned block device - Add more error injection cases to easily detect the kernel panics - add a proc entry show the entire disk layout - Improve various error paths paniced by BUG_ON in block allocation and GC - support SEEK_DATA and SEEK_HOLE for compression files Bug fixes: - avoid use-after-free issue in f2fs_filemap_fault - fix some race conditions to break the atomic write design assumption - fix to truncate meta inode pages forcely - resolve various per-file compression issues wrt the space management and compression policies - fix some swap-related bugs In addition, we removed deprecated codes such as io_bits and heap_allocation, and also fixed minor error handling routines with neat debugging messages" * tag 'f2fs-for-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (60 commits) f2fs: fix to avoid use-after-free issue in f2fs_filemap_fault f2fs: truncate page cache before clearing flags when aborting atomic write f2fs: mark inode dirty for FI_ATOMIC_COMMITTED flag f2fs: prevent atomic write on pinned file f2fs: fix to handle error paths of {new,change}_curseg() f2fs: unify the error handling of f2fs_is_valid_blkaddr f2fs: zone: fix to remove pow2 check condition for zoned block device f2fs: fix to truncate meta inode pages forcely f2fs: compress: fix reserve_cblocks counting error when out of space f2fs: compress: relocate some judgments in f2fs_reserve_compress_blocks f2fs: add a proc entry show disk layout f2fs: introduce SEGS_TO_BLKS/BLKS_TO_SEGS for cleanup f2fs: fix to check return value of f2fs_gc_range f2fs: fix to check return value __allocate_new_segment f2fs: fix to do sanity check in update_sit_entry f2fs: fix to reset fields for unloaded curseg f2fs: clean up new_curseg() f2fs: relocate f2fs_precache_extents() in f2fs_swap_activate() f2fs: fix blkofs_end correctly in f2fs_migrate_blocks() f2fs: ro: don't start discard thread for readonly image ...
This commit is contained in:
commit
c5d9ab85eb
@ -205,7 +205,7 @@ Description: Controls the idle timing of system, if there is no FS operation
|
||||
What: /sys/fs/f2fs/<disk>/discard_idle_interval
|
||||
Date: September 2018
|
||||
Contact: "Chao Yu" <yuchao0@huawei.com>
|
||||
Contact: "Sahitya Tummala" <stummala@codeaurora.org>
|
||||
Contact: "Sahitya Tummala" <quic_stummala@quicinc.com>
|
||||
Description: Controls the idle timing of discard thread given
|
||||
this time interval.
|
||||
Default is 5 secs.
|
||||
@ -213,7 +213,7 @@ Description: Controls the idle timing of discard thread given
|
||||
What: /sys/fs/f2fs/<disk>/gc_idle_interval
|
||||
Date: September 2018
|
||||
Contact: "Chao Yu" <yuchao0@huawei.com>
|
||||
Contact: "Sahitya Tummala" <stummala@codeaurora.org>
|
||||
Contact: "Sahitya Tummala" <quic_stummala@quicinc.com>
|
||||
Description: Controls the idle timing for gc path. Set to 5 seconds by default.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/iostat_enable
|
||||
@ -701,29 +701,31 @@ Description: Support configuring fault injection type, should be
|
||||
enabled with fault_injection option, fault type value
|
||||
is shown below, it supports single or combined type.
|
||||
|
||||
=================== ===========
|
||||
Type_Name Type_Value
|
||||
=================== ===========
|
||||
FAULT_KMALLOC 0x000000001
|
||||
FAULT_KVMALLOC 0x000000002
|
||||
FAULT_PAGE_ALLOC 0x000000004
|
||||
FAULT_PAGE_GET 0x000000008
|
||||
FAULT_ALLOC_BIO 0x000000010 (obsolete)
|
||||
FAULT_ALLOC_NID 0x000000020
|
||||
FAULT_ORPHAN 0x000000040
|
||||
FAULT_BLOCK 0x000000080
|
||||
FAULT_DIR_DEPTH 0x000000100
|
||||
FAULT_EVICT_INODE 0x000000200
|
||||
FAULT_TRUNCATE 0x000000400
|
||||
FAULT_READ_IO 0x000000800
|
||||
FAULT_CHECKPOINT 0x000001000
|
||||
FAULT_DISCARD 0x000002000
|
||||
FAULT_WRITE_IO 0x000004000
|
||||
FAULT_SLAB_ALLOC 0x000008000
|
||||
FAULT_DQUOT_INIT 0x000010000
|
||||
FAULT_LOCK_OP 0x000020000
|
||||
FAULT_BLKADDR 0x000040000
|
||||
=================== ===========
|
||||
=========================== ===========
|
||||
Type_Name Type_Value
|
||||
=========================== ===========
|
||||
FAULT_KMALLOC 0x000000001
|
||||
FAULT_KVMALLOC 0x000000002
|
||||
FAULT_PAGE_ALLOC 0x000000004
|
||||
FAULT_PAGE_GET 0x000000008
|
||||
FAULT_ALLOC_BIO 0x000000010 (obsolete)
|
||||
FAULT_ALLOC_NID 0x000000020
|
||||
FAULT_ORPHAN 0x000000040
|
||||
FAULT_BLOCK 0x000000080
|
||||
FAULT_DIR_DEPTH 0x000000100
|
||||
FAULT_EVICT_INODE 0x000000200
|
||||
FAULT_TRUNCATE 0x000000400
|
||||
FAULT_READ_IO 0x000000800
|
||||
FAULT_CHECKPOINT 0x000001000
|
||||
FAULT_DISCARD 0x000002000
|
||||
FAULT_WRITE_IO 0x000004000
|
||||
FAULT_SLAB_ALLOC 0x000008000
|
||||
FAULT_DQUOT_INIT 0x000010000
|
||||
FAULT_LOCK_OP 0x000020000
|
||||
FAULT_BLKADDR_VALIDITY 0x000040000
|
||||
FAULT_BLKADDR_CONSISTENCE 0x000080000
|
||||
FAULT_NO_SEGMENT 0x000100000
|
||||
=========================== ===========
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/discard_io_aware_gran
|
||||
Date: January 2023
|
||||
|
@ -126,9 +126,7 @@ norecovery Disable the roll-forward recovery routine, mounted read-
|
||||
discard/nodiscard Enable/disable real-time discard in f2fs, if discard is
|
||||
enabled, f2fs will issue discard/TRIM commands when a
|
||||
segment is cleaned.
|
||||
no_heap Disable heap-style segment allocation which finds free
|
||||
segments for data from the beginning of main area, while
|
||||
for node from the end of main area.
|
||||
heap/no_heap Deprecated.
|
||||
nouser_xattr Disable Extended User Attributes. Note: xattr is enabled
|
||||
by default if CONFIG_F2FS_FS_XATTR is selected.
|
||||
noacl Disable POSIX Access Control List. Note: acl is enabled
|
||||
@ -184,29 +182,31 @@ fault_type=%d Support configuring fault injection type, should be
|
||||
enabled with fault_injection option, fault type value
|
||||
is shown below, it supports single or combined type.
|
||||
|
||||
=================== ===========
|
||||
Type_Name Type_Value
|
||||
=================== ===========
|
||||
FAULT_KMALLOC 0x000000001
|
||||
FAULT_KVMALLOC 0x000000002
|
||||
FAULT_PAGE_ALLOC 0x000000004
|
||||
FAULT_PAGE_GET 0x000000008
|
||||
FAULT_ALLOC_BIO 0x000000010 (obsolete)
|
||||
FAULT_ALLOC_NID 0x000000020
|
||||
FAULT_ORPHAN 0x000000040
|
||||
FAULT_BLOCK 0x000000080
|
||||
FAULT_DIR_DEPTH 0x000000100
|
||||
FAULT_EVICT_INODE 0x000000200
|
||||
FAULT_TRUNCATE 0x000000400
|
||||
FAULT_READ_IO 0x000000800
|
||||
FAULT_CHECKPOINT 0x000001000
|
||||
FAULT_DISCARD 0x000002000
|
||||
FAULT_WRITE_IO 0x000004000
|
||||
FAULT_SLAB_ALLOC 0x000008000
|
||||
FAULT_DQUOT_INIT 0x000010000
|
||||
FAULT_LOCK_OP 0x000020000
|
||||
FAULT_BLKADDR 0x000040000
|
||||
=================== ===========
|
||||
=========================== ===========
|
||||
Type_Name Type_Value
|
||||
=========================== ===========
|
||||
FAULT_KMALLOC 0x000000001
|
||||
FAULT_KVMALLOC 0x000000002
|
||||
FAULT_PAGE_ALLOC 0x000000004
|
||||
FAULT_PAGE_GET 0x000000008
|
||||
FAULT_ALLOC_BIO 0x000000010 (obsolete)
|
||||
FAULT_ALLOC_NID 0x000000020
|
||||
FAULT_ORPHAN 0x000000040
|
||||
FAULT_BLOCK 0x000000080
|
||||
FAULT_DIR_DEPTH 0x000000100
|
||||
FAULT_EVICT_INODE 0x000000200
|
||||
FAULT_TRUNCATE 0x000000400
|
||||
FAULT_READ_IO 0x000000800
|
||||
FAULT_CHECKPOINT 0x000001000
|
||||
FAULT_DISCARD 0x000002000
|
||||
FAULT_WRITE_IO 0x000004000
|
||||
FAULT_SLAB_ALLOC 0x000008000
|
||||
FAULT_DQUOT_INIT 0x000010000
|
||||
FAULT_LOCK_OP 0x000020000
|
||||
FAULT_BLKADDR_VALIDITY 0x000040000
|
||||
FAULT_BLKADDR_CONSISTENCE 0x000080000
|
||||
FAULT_NO_SEGMENT 0x000100000
|
||||
=========================== ===========
|
||||
mode=%s Control block allocation mode which supports "adaptive"
|
||||
and "lfs". In "lfs" mode, there should be no random
|
||||
writes towards main area.
|
||||
@ -228,8 +228,6 @@ mode=%s Control block allocation mode which supports "adaptive"
|
||||
option for more randomness.
|
||||
Please, use these options for your experiments and we strongly
|
||||
recommend to re-format the filesystem after using these options.
|
||||
io_bits=%u Set the bit size of write IO requests. It should be set
|
||||
with "mode=lfs".
|
||||
usrquota Enable plain user disk quota accounting.
|
||||
grpquota Enable plain group disk quota accounting.
|
||||
prjquota Enable plain project quota accounting.
|
||||
|
@ -154,49 +154,47 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
|
||||
if (unlikely(f2fs_cp_error(sbi)))
|
||||
return exist;
|
||||
|
||||
if (exist && type == DATA_GENERIC_ENHANCE_UPDATE) {
|
||||
f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
|
||||
blkaddr, exist);
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
return exist;
|
||||
}
|
||||
if ((exist && type == DATA_GENERIC_ENHANCE_UPDATE) ||
|
||||
(!exist && type == DATA_GENERIC_ENHANCE))
|
||||
goto out_err;
|
||||
if (!exist && type != DATA_GENERIC_ENHANCE_UPDATE)
|
||||
goto out_handle;
|
||||
return exist;
|
||||
|
||||
if (!exist && type == DATA_GENERIC_ENHANCE) {
|
||||
f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
|
||||
blkaddr, exist);
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
dump_stack();
|
||||
}
|
||||
out_err:
|
||||
f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
|
||||
blkaddr, exist);
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
dump_stack();
|
||||
out_handle:
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
return exist;
|
||||
}
|
||||
|
||||
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
|
||||
static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr, int type)
|
||||
{
|
||||
if (time_to_inject(sbi, FAULT_BLKADDR))
|
||||
return false;
|
||||
|
||||
switch (type) {
|
||||
case META_NAT:
|
||||
break;
|
||||
case META_SIT:
|
||||
if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
|
||||
return false;
|
||||
goto err;
|
||||
break;
|
||||
case META_SSA:
|
||||
if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
|
||||
blkaddr < SM_I(sbi)->ssa_blkaddr))
|
||||
return false;
|
||||
goto err;
|
||||
break;
|
||||
case META_CP:
|
||||
if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
|
||||
blkaddr < __start_cp_addr(sbi)))
|
||||
return false;
|
||||
goto err;
|
||||
break;
|
||||
case META_POR:
|
||||
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
|
||||
blkaddr < MAIN_BLKADDR(sbi)))
|
||||
return false;
|
||||
goto err;
|
||||
break;
|
||||
case DATA_GENERIC:
|
||||
case DATA_GENERIC_ENHANCE:
|
||||
@ -213,7 +211,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
|
||||
blkaddr);
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
dump_stack();
|
||||
return false;
|
||||
goto err;
|
||||
} else {
|
||||
return __is_bitmap_valid(sbi, blkaddr, type);
|
||||
}
|
||||
@ -221,13 +219,30 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
|
||||
case META_GENERIC:
|
||||
if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
|
||||
blkaddr >= MAIN_BLKADDR(sbi)))
|
||||
return false;
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return true;
|
||||
err:
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr, int type)
|
||||
{
|
||||
if (time_to_inject(sbi, FAULT_BLKADDR_VALIDITY))
|
||||
return false;
|
||||
return __f2fs_is_valid_blkaddr(sbi, blkaddr, type);
|
||||
}
|
||||
|
||||
bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr, int type)
|
||||
{
|
||||
return __f2fs_is_valid_blkaddr(sbi, blkaddr, type);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -889,7 +904,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
|
||||
|
||||
cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count);
|
||||
|
||||
if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) {
|
||||
if (cp_blocks > BLKS_PER_SEG(sbi) || cp_blocks <= F2FS_CP_PACKS) {
|
||||
f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
|
||||
le32_to_cpu(cp_block->cp_pack_total_block_count));
|
||||
goto invalid_cp;
|
||||
@ -1324,7 +1339,7 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
|
||||
if (cpc->reason & CP_UMOUNT) {
|
||||
if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
|
||||
NM_I(sbi)->nat_bits_blocks > sbi->blocks_per_seg) {
|
||||
NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) {
|
||||
clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
|
||||
f2fs_notice(sbi, "Disable nat_bits due to no space");
|
||||
} else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
|
||||
@ -1527,7 +1542,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
cp_ver |= ((__u64)crc32 << 32);
|
||||
*(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
|
||||
|
||||
blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
|
||||
blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
|
||||
for (i = 0; i < nm_i->nat_bits_blocks; i++)
|
||||
f2fs_update_meta_page(sbi, nm_i->nat_bits +
|
||||
(i << F2FS_BLKSIZE_BITS), blk + i);
|
||||
@ -1587,8 +1602,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
*/
|
||||
if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
|
||||
f2fs_sb_has_compression(sbi))
|
||||
invalidate_mapping_pages(META_MAPPING(sbi),
|
||||
MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
|
||||
f2fs_bug_on(sbi,
|
||||
invalidate_inode_pages2_range(META_MAPPING(sbi),
|
||||
MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1));
|
||||
|
||||
f2fs_release_ino_entry(sbi, false);
|
||||
|
||||
@ -1730,9 +1746,9 @@ void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
|
||||
im->ino_num = 0;
|
||||
}
|
||||
|
||||
sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
|
||||
sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS -
|
||||
NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) *
|
||||
F2FS_ORPHANS_PER_BLOCK;
|
||||
F2FS_ORPHANS_PER_BLOCK;
|
||||
}
|
||||
|
||||
int __init f2fs_create_checkpoint_caches(void)
|
||||
|
@ -512,8 +512,8 @@ static int lzorle_compress_pages(struct compress_ctx *cc)
|
||||
ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
|
||||
&cc->clen, cc->private);
|
||||
if (ret != LZO_E_OK) {
|
||||
printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
|
||||
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
|
||||
f2fs_err_ratelimited(F2FS_I_SB(cc->inode),
|
||||
"lzo-rle compress failed, ret:%d", ret);
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
@ -780,9 +780,9 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
|
||||
if (provided != calculated) {
|
||||
if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
|
||||
set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
|
||||
printk_ratelimited(
|
||||
"%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
|
||||
KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
|
||||
f2fs_info_ratelimited(sbi,
|
||||
"checksum invalid, nid = %lu, %x vs %x",
|
||||
dic->inode->i_ino,
|
||||
provided, calculated);
|
||||
}
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
@ -1418,6 +1418,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
|
||||
struct f2fs_sb_info *sbi = bio->bi_private;
|
||||
struct compress_io_ctx *cic =
|
||||
(struct compress_io_ctx *)page_private(page);
|
||||
enum count_type type = WB_DATA_TYPE(page,
|
||||
f2fs_is_compressed_page(page));
|
||||
int i;
|
||||
|
||||
if (unlikely(bio->bi_status))
|
||||
@ -1425,7 +1427,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
|
||||
|
||||
f2fs_compress_free_page(page);
|
||||
|
||||
dec_page_count(sbi, F2FS_WB_DATA);
|
||||
dec_page_count(sbi, type);
|
||||
|
||||
if (atomic_dec_return(&cic->pending_pages))
|
||||
return;
|
||||
@ -1441,12 +1443,14 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
|
||||
}
|
||||
|
||||
static int f2fs_write_raw_pages(struct compress_ctx *cc,
|
||||
int *submitted,
|
||||
int *submitted_p,
|
||||
struct writeback_control *wbc,
|
||||
enum iostat_type io_type)
|
||||
{
|
||||
struct address_space *mapping = cc->inode->i_mapping;
|
||||
int _submitted, compr_blocks, ret, i;
|
||||
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
|
||||
int submitted, compr_blocks, i;
|
||||
int ret = 0;
|
||||
|
||||
compr_blocks = f2fs_compressed_blocks(cc);
|
||||
|
||||
@ -1461,6 +1465,10 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
|
||||
if (compr_blocks < 0)
|
||||
return compr_blocks;
|
||||
|
||||
/* overwrite compressed cluster w/ normal cluster */
|
||||
if (compr_blocks > 0)
|
||||
f2fs_lock_op(sbi);
|
||||
|
||||
for (i = 0; i < cc->cluster_size; i++) {
|
||||
if (!cc->rpages[i])
|
||||
continue;
|
||||
@ -1485,7 +1493,7 @@ continue_unlock:
|
||||
if (!clear_page_dirty_for_io(cc->rpages[i]))
|
||||
goto continue_unlock;
|
||||
|
||||
ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
|
||||
ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
|
||||
NULL, NULL, wbc, io_type,
|
||||
compr_blocks, false);
|
||||
if (ret) {
|
||||
@ -1493,26 +1501,29 @@ continue_unlock:
|
||||
unlock_page(cc->rpages[i]);
|
||||
ret = 0;
|
||||
} else if (ret == -EAGAIN) {
|
||||
ret = 0;
|
||||
/*
|
||||
* for quota file, just redirty left pages to
|
||||
* avoid deadlock caused by cluster update race
|
||||
* from foreground operation.
|
||||
*/
|
||||
if (IS_NOQUOTA(cc->inode))
|
||||
return 0;
|
||||
ret = 0;
|
||||
goto out;
|
||||
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
|
||||
goto retry_write;
|
||||
}
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*submitted += _submitted;
|
||||
*submitted_p += submitted;
|
||||
}
|
||||
|
||||
f2fs_balance_fs(F2FS_M_SB(mapping), true);
|
||||
out:
|
||||
if (compr_blocks > 0)
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
return 0;
|
||||
f2fs_balance_fs(sbi, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int f2fs_write_multi_pages(struct compress_ctx *cc,
|
||||
@ -1806,16 +1817,18 @@ void f2fs_put_page_dic(struct page *page, bool in_task)
|
||||
* check whether cluster blocks are contiguous, and add extent cache entry
|
||||
* only if cluster blocks are logically and physically contiguous.
|
||||
*/
|
||||
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
|
||||
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
|
||||
unsigned int ofs_in_node)
|
||||
{
|
||||
bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
|
||||
bool compressed = data_blkaddr(dn->inode, dn->node_page,
|
||||
ofs_in_node) == COMPRESS_ADDR;
|
||||
int i = compressed ? 1 : 0;
|
||||
block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
||||
dn->ofs_in_node + i);
|
||||
ofs_in_node + i);
|
||||
|
||||
for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
|
||||
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
||||
dn->ofs_in_node + i);
|
||||
ofs_in_node + i);
|
||||
|
||||
if (!__is_valid_data_blkaddr(blkaddr))
|
||||
break;
|
||||
@ -1878,12 +1891,8 @@ void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||
|
||||
set_page_private_data(cpage, ino);
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
|
||||
goto out;
|
||||
|
||||
memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
|
||||
SetPageUptodate(cpage);
|
||||
out:
|
||||
f2fs_put_page(cpage, 1);
|
||||
}
|
||||
|
||||
|
191
fs/f2fs/data.c
191
fs/f2fs/data.c
@ -48,7 +48,7 @@ void f2fs_destroy_bioset(void)
|
||||
bioset_exit(&f2fs_bioset);
|
||||
}
|
||||
|
||||
static bool __is_cp_guaranteed(struct page *page)
|
||||
bool f2fs_is_cp_guaranteed(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct inode *inode;
|
||||
@ -65,8 +65,6 @@ static bool __is_cp_guaranteed(struct page *page)
|
||||
S_ISDIR(inode->i_mode))
|
||||
return true;
|
||||
|
||||
if (f2fs_is_compressed_page(page))
|
||||
return false;
|
||||
if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
|
||||
page_private_gcing(page))
|
||||
return true;
|
||||
@ -338,18 +336,7 @@ static void f2fs_write_end_io(struct bio *bio)
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, iter_all) {
|
||||
struct page *page = bvec->bv_page;
|
||||
enum count_type type = WB_DATA_TYPE(page);
|
||||
|
||||
if (page_private_dummy(page)) {
|
||||
clear_page_private_dummy(page);
|
||||
unlock_page(page);
|
||||
mempool_free(page, sbi->write_io_dummy);
|
||||
|
||||
if (unlikely(bio->bi_status))
|
||||
f2fs_stop_checkpoint(sbi, true,
|
||||
STOP_CP_REASON_WRITE_FAIL);
|
||||
continue;
|
||||
}
|
||||
enum count_type type = WB_DATA_TYPE(page, false);
|
||||
|
||||
fscrypt_finalize_bounce_page(&page);
|
||||
|
||||
@ -524,50 +511,13 @@ void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
||||
static void f2fs_align_write_bio(struct f2fs_sb_info *sbi, struct bio *bio)
|
||||
{
|
||||
unsigned int start =
|
||||
(bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS) % F2FS_IO_SIZE(sbi);
|
||||
|
||||
if (start == 0)
|
||||
return;
|
||||
|
||||
/* fill dummy pages */
|
||||
for (; start < F2FS_IO_SIZE(sbi); start++) {
|
||||
struct page *page =
|
||||
mempool_alloc(sbi->write_io_dummy,
|
||||
GFP_NOIO | __GFP_NOFAIL);
|
||||
f2fs_bug_on(sbi, !page);
|
||||
|
||||
lock_page(page);
|
||||
|
||||
zero_user_segment(page, 0, PAGE_SIZE);
|
||||
set_page_private_dummy(page);
|
||||
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
|
||||
f2fs_bug_on(sbi, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
|
||||
enum page_type type)
|
||||
{
|
||||
WARN_ON_ONCE(is_read_io(bio_op(bio)));
|
||||
|
||||
if (type == DATA || type == NODE) {
|
||||
if (f2fs_lfs_mode(sbi) && current->plug)
|
||||
blk_finish_plug(current->plug);
|
||||
|
||||
if (F2FS_IO_ALIGNED(sbi)) {
|
||||
f2fs_align_write_bio(sbi, bio);
|
||||
/*
|
||||
* In the NODE case, we lose next block address chain.
|
||||
* So, we need to do checkpoint in f2fs_sync_file.
|
||||
*/
|
||||
if (type == NODE)
|
||||
set_sbi_flag(sbi, SBI_NEED_CP);
|
||||
}
|
||||
}
|
||||
if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
|
||||
blk_finish_plug(current->plug);
|
||||
|
||||
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
|
||||
iostat_update_submit_ctx(bio, type);
|
||||
@ -740,10 +690,8 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
|
||||
fio->is_por ? META_POR : (__is_meta_io(fio) ?
|
||||
META_GENERIC : DATA_GENERIC_ENHANCE))) {
|
||||
f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
|
||||
META_GENERIC : DATA_GENERIC_ENHANCE)))
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
trace_f2fs_submit_page_bio(page, fio);
|
||||
|
||||
@ -762,7 +710,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
||||
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
|
||||
|
||||
inc_page_count(fio->sbi, is_read_io(fio->op) ?
|
||||
__read_io_type(page) : WB_DATA_TYPE(fio->page));
|
||||
__read_io_type(page) : WB_DATA_TYPE(fio->page, false));
|
||||
|
||||
if (is_read_io(bio_op(bio)))
|
||||
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
|
||||
@ -796,16 +744,6 @@ static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
|
||||
block_t last_blkaddr,
|
||||
block_t cur_blkaddr)
|
||||
{
|
||||
if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
|
||||
unsigned int filled_blocks =
|
||||
F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
|
||||
unsigned int io_size = F2FS_IO_SIZE(sbi);
|
||||
unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
|
||||
|
||||
/* IOs in bio is aligned and left space of vectors is not enough */
|
||||
if (!(filled_blocks % io_size) && left_vecs < io_size)
|
||||
return false;
|
||||
}
|
||||
if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
|
||||
return false;
|
||||
return io_type_is_mergeable(io, fio);
|
||||
@ -948,10 +886,8 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
||||
fio->encrypted_page : fio->page;
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
|
||||
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) {
|
||||
f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
|
||||
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
trace_f2fs_submit_page_bio(page, fio);
|
||||
|
||||
@ -973,7 +909,7 @@ alloc_new:
|
||||
if (fio->io_wbc)
|
||||
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
|
||||
|
||||
inc_page_count(fio->sbi, WB_DATA_TYPE(page));
|
||||
inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
|
||||
|
||||
*fio->last_block = fio->new_blkaddr;
|
||||
*fio->bio = bio;
|
||||
@ -1007,11 +943,12 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
||||
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
|
||||
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
|
||||
struct page *bio_page;
|
||||
enum count_type type;
|
||||
|
||||
f2fs_bug_on(sbi, is_read_io(fio->op));
|
||||
|
||||
f2fs_down_write(&io->io_rwsem);
|
||||
|
||||
next:
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
|
||||
wait_for_completion_io(&io->zone_wait);
|
||||
@ -1021,7 +958,6 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
||||
}
|
||||
#endif
|
||||
|
||||
next:
|
||||
if (fio->in_list) {
|
||||
spin_lock(&io->io_lock);
|
||||
if (list_empty(&io->io_list)) {
|
||||
@ -1046,7 +982,8 @@ next:
|
||||
/* set submitted = true as a return value */
|
||||
fio->submitted = 1;
|
||||
|
||||
inc_page_count(sbi, WB_DATA_TYPE(bio_page));
|
||||
type = WB_DATA_TYPE(bio_page, fio->compressed_page);
|
||||
inc_page_count(sbi, type);
|
||||
|
||||
if (io->bio &&
|
||||
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
|
||||
@ -1056,13 +993,6 @@ next:
|
||||
__submit_merged_bio(io);
|
||||
alloc_new:
|
||||
if (io->bio == NULL) {
|
||||
if (F2FS_IO_ALIGNED(sbi) &&
|
||||
(fio->type == DATA || fio->type == NODE) &&
|
||||
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
|
||||
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
|
||||
fio->retry = 1;
|
||||
goto skip;
|
||||
}
|
||||
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
|
||||
f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
|
||||
bio_page->index, fio, GFP_NOIO);
|
||||
@ -1080,10 +1010,6 @@ alloc_new:
|
||||
io->last_block_in_bio = fio->new_blkaddr;
|
||||
|
||||
trace_f2fs_submit_page_write(fio->page, fio);
|
||||
skip:
|
||||
if (fio->in_list)
|
||||
goto next;
|
||||
out:
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
|
||||
is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
|
||||
@ -1096,6 +1022,9 @@ out:
|
||||
__submit_merged_bio(io);
|
||||
}
|
||||
#endif
|
||||
if (fio->in_list)
|
||||
goto next;
|
||||
out:
|
||||
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
|
||||
!f2fs_is_checkpoint_ready(sbi))
|
||||
__submit_merged_bio(io);
|
||||
@ -1218,7 +1147,8 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
|
||||
|
||||
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
|
||||
return -EPERM;
|
||||
if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
|
||||
err = inc_valid_block_count(sbi, dn->inode, &count, true);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
|
||||
@ -1285,8 +1215,6 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
|
||||
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
|
||||
DATA_GENERIC_ENHANCE_READ)) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(F2FS_I_SB(inode),
|
||||
ERROR_INVALID_BLKADDR);
|
||||
goto put_err;
|
||||
}
|
||||
goto got_it;
|
||||
@ -1312,8 +1240,6 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
|
||||
dn.data_blkaddr,
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(F2FS_I_SB(inode),
|
||||
ERROR_INVALID_BLKADDR);
|
||||
goto put_err;
|
||||
}
|
||||
got_it:
|
||||
@ -1475,15 +1401,18 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
|
||||
|
||||
dn->data_blkaddr = f2fs_data_blkaddr(dn);
|
||||
if (dn->data_blkaddr == NULL_ADDR) {
|
||||
err = inc_valid_block_count(sbi, dn->inode, &count);
|
||||
err = inc_valid_block_count(sbi, dn->inode, &count, true);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
}
|
||||
|
||||
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
|
||||
old_blkaddr = dn->data_blkaddr;
|
||||
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
|
||||
&sum, seg_type, NULL);
|
||||
err = f2fs_allocate_data_block(sbi, NULL, old_blkaddr,
|
||||
&dn->data_blkaddr, &sum, seg_type, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
|
||||
f2fs_invalidate_internal_cache(sbi, old_blkaddr);
|
||||
|
||||
@ -1641,7 +1570,6 @@ next_block:
|
||||
if (!is_hole &&
|
||||
!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
goto sync_out;
|
||||
}
|
||||
|
||||
@ -2165,8 +2093,6 @@ got_it:
|
||||
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
|
||||
DATA_GENERIC_ENHANCE_READ)) {
|
||||
ret = -EFSCORRUPTED;
|
||||
f2fs_handle_error(F2FS_I_SB(inode),
|
||||
ERROR_INVALID_BLKADDR);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
@ -2668,8 +2594,6 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
|
||||
if (fio) {
|
||||
if (page_private_gcing(fio->page))
|
||||
return true;
|
||||
if (page_private_dummy(fio->page))
|
||||
return true;
|
||||
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
|
||||
f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
|
||||
return true;
|
||||
@ -2706,11 +2630,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
|
||||
f2fs_lookup_read_extent_cache_block(inode, page->index,
|
||||
&fio->old_blkaddr)) {
|
||||
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
f2fs_handle_error(fio->sbi,
|
||||
ERROR_INVALID_BLKADDR);
|
||||
DATA_GENERIC_ENHANCE))
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
ipu_force = true;
|
||||
fio->need_lock = LOCK_DONE;
|
||||
@ -2738,7 +2659,6 @@ got_it:
|
||||
!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
|
||||
goto out_writepage;
|
||||
}
|
||||
|
||||
@ -2838,7 +2758,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
||||
.encrypted_page = NULL,
|
||||
.submitted = 0,
|
||||
.compr_blocks = compr_blocks,
|
||||
.need_lock = LOCK_RETRY,
|
||||
.need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
|
||||
.post_read = f2fs_post_read_required(inode) ? 1 : 0,
|
||||
.io_type = io_type,
|
||||
.io_wbc = wbc,
|
||||
@ -2919,6 +2839,7 @@ write:
|
||||
if (err == -EAGAIN) {
|
||||
err = f2fs_do_write_data_page(&fio);
|
||||
if (err == -EAGAIN) {
|
||||
f2fs_bug_on(sbi, compr_blocks);
|
||||
fio.need_lock = LOCK_REQ;
|
||||
err = f2fs_do_write_data_page(&fio);
|
||||
}
|
||||
@ -3704,7 +3625,6 @@ repeat:
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
|
||||
DATA_GENERIC_ENHANCE_READ)) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
goto fail;
|
||||
}
|
||||
err = f2fs_submit_page_read(use_cow ?
|
||||
@ -3905,26 +3825,36 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
unsigned int blkofs;
|
||||
unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
|
||||
unsigned int end_blk = start_blk + blkcnt - 1;
|
||||
unsigned int secidx = start_blk / blk_per_sec;
|
||||
unsigned int end_sec = secidx + blkcnt / blk_per_sec;
|
||||
unsigned int end_sec;
|
||||
int ret = 0;
|
||||
|
||||
if (!blkcnt)
|
||||
return 0;
|
||||
end_sec = end_blk / blk_per_sec;
|
||||
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
set_inode_flag(inode, FI_ALIGNED_WRITE);
|
||||
set_inode_flag(inode, FI_OPU_WRITE);
|
||||
|
||||
for (; secidx < end_sec; secidx++) {
|
||||
for (; secidx <= end_sec; secidx++) {
|
||||
unsigned int blkofs_end = secidx == end_sec ?
|
||||
end_blk % blk_per_sec : blk_per_sec - 1;
|
||||
|
||||
f2fs_down_write(&sbi->pin_sem);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
|
||||
f2fs_unlock_op(sbi);
|
||||
ret = f2fs_allocate_pinning_section(sbi);
|
||||
if (ret) {
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
break;
|
||||
}
|
||||
|
||||
set_inode_flag(inode, FI_SKIP_WRITES);
|
||||
|
||||
for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
|
||||
for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
|
||||
struct page *page;
|
||||
unsigned int blkidx = secidx * blk_per_sec + blkofs;
|
||||
|
||||
@ -4013,27 +3943,34 @@ retry:
|
||||
nr_pblocks = map.m_len;
|
||||
|
||||
if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
|
||||
nr_pblocks & sec_blks_mask) {
|
||||
nr_pblocks & sec_blks_mask ||
|
||||
!f2fs_valid_pinned_area(sbi, pblock)) {
|
||||
bool last_extent = false;
|
||||
|
||||
not_aligned++;
|
||||
|
||||
nr_pblocks = roundup(nr_pblocks, blks_per_sec);
|
||||
if (cur_lblock + nr_pblocks > sis->max)
|
||||
nr_pblocks -= blks_per_sec;
|
||||
|
||||
/* this extent is last one */
|
||||
if (!nr_pblocks) {
|
||||
/* this extent is last one */
|
||||
nr_pblocks = map.m_len;
|
||||
f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
|
||||
goto next;
|
||||
nr_pblocks = last_lblock - cur_lblock;
|
||||
last_extent = true;
|
||||
}
|
||||
|
||||
ret = f2fs_migrate_blocks(inode, cur_lblock,
|
||||
nr_pblocks);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
if (ret == -ENOENT)
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (!last_extent)
|
||||
goto retry;
|
||||
}
|
||||
next:
|
||||
|
||||
if (cur_lblock + nr_pblocks >= sis->max)
|
||||
nr_pblocks = sis->max - cur_lblock;
|
||||
|
||||
@ -4071,17 +4008,17 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||
sector_t *span)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
int ret;
|
||||
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return -EINVAL;
|
||||
|
||||
if (f2fs_readonly(F2FS_I_SB(inode)->sb))
|
||||
if (f2fs_readonly(sbi->sb))
|
||||
return -EROFS;
|
||||
|
||||
if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
|
||||
f2fs_err(F2FS_I_SB(inode),
|
||||
"Swapfile not supported in LFS mode");
|
||||
if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) {
|
||||
f2fs_err(sbi, "Swapfile not supported in LFS mode");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -4092,6 +4029,10 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||
if (!f2fs_disable_compressed_file(inode))
|
||||
return -EINVAL;
|
||||
|
||||
ret = filemap_fdatawrite(inode->i_mapping);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
f2fs_precache_extents(inode);
|
||||
|
||||
ret = check_swap_activate(sis, file, span);
|
||||
@ -4100,7 +4041,7 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||
|
||||
stat_inc_swapfile_inode(inode);
|
||||
set_inode_flag(inode, FI_PIN_FILE);
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
f2fs_update_time(sbi, REQ_TIME);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi)
|
||||
total_vblocks = 0;
|
||||
blks_per_sec = CAP_BLKS_PER_SEC(sbi);
|
||||
hblks_per_sec = blks_per_sec / 2;
|
||||
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
|
||||
for (segno = 0; segno < MAIN_SEGS(sbi); segno += SEGS_PER_SEC(sbi)) {
|
||||
vblocks = get_valid_blocks(sbi, segno, true);
|
||||
dist = abs(vblocks - hblks_per_sec);
|
||||
bimodal += dist * dist;
|
||||
@ -135,7 +135,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
|
||||
si->cur_ckpt_time = sbi->cprc_info.cur_time;
|
||||
si->peak_ckpt_time = sbi->cprc_info.peak_time;
|
||||
spin_unlock(&sbi->cprc_info.stat_lock);
|
||||
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
|
||||
si->total_count = BLKS_TO_SEGS(sbi, (int)sbi->user_block_count);
|
||||
si->rsvd_segs = reserved_segments(sbi);
|
||||
si->overp_segs = overprovision_segments(sbi);
|
||||
si->valid_count = valid_user_blocks(sbi);
|
||||
@ -176,11 +176,10 @@ static void update_general_status(struct f2fs_sb_info *sbi)
|
||||
si->alloc_nids = NM_I(sbi)->nid_cnt[PREALLOC_NID];
|
||||
si->io_skip_bggc = sbi->io_skip_bggc;
|
||||
si->other_skip_bggc = sbi->other_skip_bggc;
|
||||
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
|
||||
si->util_free = (int)(BLKS_TO_SEGS(sbi, free_user_blocks(sbi)))
|
||||
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
|
||||
/ 2;
|
||||
si->util_valid = (int)(written_block_count(sbi) >>
|
||||
sbi->log_blocks_per_seg)
|
||||
si->util_valid = (int)(BLKS_TO_SEGS(sbi, written_block_count(sbi)))
|
||||
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
|
||||
/ 2;
|
||||
si->util_invalid = 50 - si->util_free - si->util_valid;
|
||||
@ -208,7 +207,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
|
||||
if (!blks)
|
||||
continue;
|
||||
|
||||
if (blks == sbi->blocks_per_seg)
|
||||
if (blks == BLKS_PER_SEG(sbi))
|
||||
si->full_seg[type]++;
|
||||
else
|
||||
si->dirty_seg[type]++;
|
||||
|
@ -830,13 +830,14 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
|
||||
return err;
|
||||
}
|
||||
|
||||
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
|
||||
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
|
||||
struct f2fs_filename *fname)
|
||||
{
|
||||
struct page *page;
|
||||
int err = 0;
|
||||
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
|
||||
page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
goto fail;
|
||||
@ -995,9 +996,8 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
|
||||
de = &d->dentry[bit_pos];
|
||||
if (de->name_len == 0) {
|
||||
if (found_valid_dirent || !bit_pos) {
|
||||
printk_ratelimited(
|
||||
"%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
|
||||
KERN_WARNING, sbi->sb->s_id,
|
||||
f2fs_warn_ratelimited(sbi,
|
||||
"invalid namelen(0), ino:%u, run fsck to fix.",
|
||||
le32_to_cpu(de->ino));
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
}
|
||||
|
@ -43,7 +43,6 @@ bool sanity_check_extent_cache(struct inode *inode)
|
||||
if (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE) ||
|
||||
!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
|
||||
__func__, inode->i_ino,
|
||||
ei->blk, ei->fofs, ei->len);
|
||||
@ -856,10 +855,8 @@ static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
|
||||
goto out;
|
||||
|
||||
if (__is_valid_data_blkaddr(blkaddr) &&
|
||||
!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
|
||||
f2fs_bug_on(sbi, 1);
|
||||
!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
|
||||
return -EINVAL;
|
||||
}
|
||||
out:
|
||||
/*
|
||||
* init block age with zero, this can happen when the block age extent
|
||||
|
241
fs/f2fs/f2fs.h
241
fs/f2fs/f2fs.h
@ -61,7 +61,9 @@ enum {
|
||||
FAULT_SLAB_ALLOC,
|
||||
FAULT_DQUOT_INIT,
|
||||
FAULT_LOCK_OP,
|
||||
FAULT_BLKADDR,
|
||||
FAULT_BLKADDR_VALIDITY,
|
||||
FAULT_BLKADDR_CONSISTENCE,
|
||||
FAULT_NO_SEGMENT,
|
||||
FAULT_MAX,
|
||||
};
|
||||
|
||||
@ -76,6 +78,11 @@ struct f2fs_fault_info {
|
||||
|
||||
extern const char *f2fs_fault_name[FAULT_MAX];
|
||||
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
|
||||
|
||||
/* maximum retry count for injected failure */
|
||||
#define DEFAULT_FAILURE_RETRY_COUNT 8
|
||||
#else
|
||||
#define DEFAULT_FAILURE_RETRY_COUNT 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -143,7 +150,6 @@ struct f2fs_rwsem {
|
||||
|
||||
struct f2fs_mount_info {
|
||||
unsigned int opt;
|
||||
int write_io_size_bits; /* Write IO size bits */
|
||||
block_t root_reserved_blocks; /* root reserved blocks */
|
||||
kuid_t s_resuid; /* reserved blocks for uid */
|
||||
kgid_t s_resgid; /* reserved blocks for gid */
|
||||
@ -1081,7 +1087,8 @@ struct f2fs_sm_info {
|
||||
* f2fs monitors the number of several block types such as on-writeback,
|
||||
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
|
||||
*/
|
||||
#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
|
||||
#define WB_DATA_TYPE(p, f) \
|
||||
(f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
|
||||
enum count_type {
|
||||
F2FS_DIRTY_DENTS,
|
||||
F2FS_DIRTY_DATA,
|
||||
@ -1111,6 +1118,7 @@ enum count_type {
|
||||
* ... Only can be used with META.
|
||||
*/
|
||||
#define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
|
||||
#define PAGE_TYPE_ON_MAIN(type) ((type) == DATA || (type) == NODE)
|
||||
enum page_type {
|
||||
DATA = 0,
|
||||
NODE = 1, /* should not change this */
|
||||
@ -1205,7 +1213,6 @@ struct f2fs_io_info {
|
||||
unsigned int submitted:1; /* indicate IO submission */
|
||||
unsigned int in_list:1; /* indicate fio is in io_list */
|
||||
unsigned int is_por:1; /* indicate IO is from recovery or not */
|
||||
unsigned int retry:1; /* need to reallocate block address */
|
||||
unsigned int encrypted:1; /* indicate file is encrypted */
|
||||
unsigned int post_read:1; /* require post read */
|
||||
enum iostat_type io_type; /* io type */
|
||||
@ -1407,18 +1414,16 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
|
||||
* Layout A: lowest bit should be 1
|
||||
* | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
|
||||
* bit 0 PAGE_PRIVATE_NOT_POINTER
|
||||
* bit 1 PAGE_PRIVATE_DUMMY_WRITE
|
||||
* bit 2 PAGE_PRIVATE_ONGOING_MIGRATION
|
||||
* bit 3 PAGE_PRIVATE_INLINE_INODE
|
||||
* bit 4 PAGE_PRIVATE_REF_RESOURCE
|
||||
* bit 5- f2fs private data
|
||||
* bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
|
||||
* bit 2 PAGE_PRIVATE_INLINE_INODE
|
||||
* bit 3 PAGE_PRIVATE_REF_RESOURCE
|
||||
* bit 4- f2fs private data
|
||||
*
|
||||
* Layout B: lowest bit should be 0
|
||||
* page.private is a wrapped pointer.
|
||||
*/
|
||||
enum {
|
||||
PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
|
||||
PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
|
||||
PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
|
||||
PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
|
||||
PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
|
||||
@ -1565,7 +1570,6 @@ struct f2fs_sb_info {
|
||||
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
|
||||
/* keep migration IO order for LFS mode */
|
||||
struct f2fs_rwsem io_order_lock;
|
||||
mempool_t *write_io_dummy; /* Dummy pages */
|
||||
pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */
|
||||
int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */
|
||||
|
||||
@ -1811,6 +1815,37 @@ struct f2fs_sb_info {
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Definitions to access f2fs_sb_info */
|
||||
#define SEGS_TO_BLKS(sbi, segs) \
|
||||
((segs) << (sbi)->log_blocks_per_seg)
|
||||
#define BLKS_TO_SEGS(sbi, blks) \
|
||||
((blks) >> (sbi)->log_blocks_per_seg)
|
||||
|
||||
#define BLKS_PER_SEG(sbi) ((sbi)->blocks_per_seg)
|
||||
#define BLKS_PER_SEC(sbi) (SEGS_TO_BLKS(sbi, (sbi)->segs_per_sec))
|
||||
#define SEGS_PER_SEC(sbi) ((sbi)->segs_per_sec)
|
||||
|
||||
__printf(3, 4)
|
||||
void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, const char *fmt, ...);
|
||||
|
||||
#define f2fs_err(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, false, KERN_ERR fmt, ##__VA_ARGS__)
|
||||
#define f2fs_warn(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, false, KERN_WARNING fmt, ##__VA_ARGS__)
|
||||
#define f2fs_notice(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, false, KERN_NOTICE fmt, ##__VA_ARGS__)
|
||||
#define f2fs_info(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, false, KERN_INFO fmt, ##__VA_ARGS__)
|
||||
#define f2fs_debug(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, false, KERN_DEBUG fmt, ##__VA_ARGS__)
|
||||
|
||||
#define f2fs_err_ratelimited(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, true, KERN_ERR fmt, ##__VA_ARGS__)
|
||||
#define f2fs_warn_ratelimited(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, true, KERN_WARNING fmt, ##__VA_ARGS__)
|
||||
#define f2fs_info_ratelimited(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, true, KERN_INFO fmt, ##__VA_ARGS__)
|
||||
|
||||
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
||||
#define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \
|
||||
__builtin_return_address(0))
|
||||
@ -1828,9 +1863,8 @@ static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
|
||||
atomic_inc(&ffi->inject_ops);
|
||||
if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
|
||||
atomic_set(&ffi->inject_ops, 0);
|
||||
printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",
|
||||
KERN_INFO, sbi->sb->s_id, f2fs_fault_name[type],
|
||||
func, parent_func);
|
||||
f2fs_info_ratelimited(sbi, "inject %s in %s of %pS",
|
||||
f2fs_fault_name[type], func, parent_func);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -2250,9 +2284,30 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int get_available_block_count(struct f2fs_sb_info *sbi,
|
||||
struct inode *inode, bool cap)
|
||||
{
|
||||
block_t avail_user_block_count;
|
||||
|
||||
avail_user_block_count = sbi->user_block_count -
|
||||
sbi->current_reserved_blocks;
|
||||
|
||||
if (!__allow_reserved_blocks(sbi, inode, cap))
|
||||
avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
|
||||
|
||||
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
|
||||
if (avail_user_block_count > sbi->unusable_block_count)
|
||||
avail_user_block_count -= sbi->unusable_block_count;
|
||||
else
|
||||
avail_user_block_count = 0;
|
||||
}
|
||||
|
||||
return avail_user_block_count;
|
||||
}
|
||||
|
||||
static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
|
||||
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
|
||||
struct inode *inode, blkcnt_t *count)
|
||||
struct inode *inode, blkcnt_t *count, bool partial)
|
||||
{
|
||||
blkcnt_t diff = 0, release = 0;
|
||||
block_t avail_user_block_count;
|
||||
@ -2275,23 +2330,14 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
|
||||
|
||||
spin_lock(&sbi->stat_lock);
|
||||
sbi->total_valid_block_count += (block_t)(*count);
|
||||
avail_user_block_count = sbi->user_block_count -
|
||||
sbi->current_reserved_blocks;
|
||||
avail_user_block_count = get_available_block_count(sbi, inode, true);
|
||||
|
||||
if (!__allow_reserved_blocks(sbi, inode, true))
|
||||
avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
|
||||
|
||||
if (F2FS_IO_ALIGNED(sbi))
|
||||
avail_user_block_count -= sbi->blocks_per_seg *
|
||||
SM_I(sbi)->additional_reserved_segments;
|
||||
|
||||
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
|
||||
if (avail_user_block_count > sbi->unusable_block_count)
|
||||
avail_user_block_count -= sbi->unusable_block_count;
|
||||
else
|
||||
avail_user_block_count = 0;
|
||||
}
|
||||
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
|
||||
if (!partial) {
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
goto enospc;
|
||||
}
|
||||
|
||||
diff = sbi->total_valid_block_count - avail_user_block_count;
|
||||
if (diff > *count)
|
||||
diff = *count;
|
||||
@ -2319,20 +2365,6 @@ release_quota:
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
__printf(2, 3)
|
||||
void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
|
||||
|
||||
#define f2fs_err(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
|
||||
#define f2fs_warn(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
|
||||
#define f2fs_notice(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
|
||||
#define f2fs_info(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
|
||||
#define f2fs_debug(sbi, fmt, ...) \
|
||||
f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
|
||||
|
||||
#define PAGE_PRIVATE_GET_FUNC(name, flagname) \
|
||||
static inline bool page_private_##name(struct page *page) \
|
||||
{ \
|
||||
@ -2361,17 +2393,14 @@ static inline void clear_page_private_##name(struct page *page) \
|
||||
PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
|
||||
PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
|
||||
PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
|
||||
PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
|
||||
|
||||
PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
|
||||
PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
|
||||
PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
|
||||
PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
|
||||
|
||||
PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
|
||||
PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
|
||||
PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
|
||||
PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
|
||||
|
||||
static inline unsigned long get_page_private_data(struct page *page)
|
||||
{
|
||||
@ -2505,11 +2534,8 @@ static inline int get_dirty_pages(struct inode *inode)
|
||||
|
||||
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
|
||||
{
|
||||
unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
|
||||
unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
|
||||
sbi->log_blocks_per_seg;
|
||||
|
||||
return segs / sbi->segs_per_sec;
|
||||
return div_u64(get_pages(sbi, block_type) + BLKS_PER_SEC(sbi) - 1,
|
||||
BLKS_PER_SEC(sbi));
|
||||
}
|
||||
|
||||
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
|
||||
@ -2573,7 +2599,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
|
||||
block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
|
||||
|
||||
if (sbi->cur_cp_pack == 2)
|
||||
start_addr += sbi->blocks_per_seg;
|
||||
start_addr += BLKS_PER_SEG(sbi);
|
||||
return start_addr;
|
||||
}
|
||||
|
||||
@ -2582,7 +2608,7 @@ static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
|
||||
block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
|
||||
|
||||
if (sbi->cur_cp_pack == 1)
|
||||
start_addr += sbi->blocks_per_seg;
|
||||
start_addr += BLKS_PER_SEG(sbi);
|
||||
return start_addr;
|
||||
}
|
||||
|
||||
@ -2601,7 +2627,8 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
|
||||
struct inode *inode, bool is_inode)
|
||||
{
|
||||
block_t valid_block_count;
|
||||
unsigned int valid_node_count, user_block_count;
|
||||
unsigned int valid_node_count;
|
||||
unsigned int avail_user_block_count;
|
||||
int err;
|
||||
|
||||
if (is_inode) {
|
||||
@ -2621,21 +2648,10 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
|
||||
|
||||
spin_lock(&sbi->stat_lock);
|
||||
|
||||
valid_block_count = sbi->total_valid_block_count +
|
||||
sbi->current_reserved_blocks + 1;
|
||||
valid_block_count = sbi->total_valid_block_count + 1;
|
||||
avail_user_block_count = get_available_block_count(sbi, inode, false);
|
||||
|
||||
if (!__allow_reserved_blocks(sbi, inode, false))
|
||||
valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
|
||||
|
||||
if (F2FS_IO_ALIGNED(sbi))
|
||||
valid_block_count += sbi->blocks_per_seg *
|
||||
SM_I(sbi)->additional_reserved_segments;
|
||||
|
||||
user_block_count = sbi->user_block_count;
|
||||
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
|
||||
user_block_count -= sbi->unusable_block_count;
|
||||
|
||||
if (unlikely(valid_block_count > user_block_count)) {
|
||||
if (unlikely(valid_block_count > avail_user_block_count)) {
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
goto enospc;
|
||||
}
|
||||
@ -3022,6 +3038,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
|
||||
case FI_INLINE_DOTS:
|
||||
case FI_PIN_FILE:
|
||||
case FI_COMPRESS_RELEASED:
|
||||
case FI_ATOMIC_COMMITTED:
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
}
|
||||
}
|
||||
@ -3445,7 +3462,7 @@ static inline __le32 *get_dnode_addr(struct inode *inode,
|
||||
sizeof((f2fs_inode)->field)) \
|
||||
<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
|
||||
|
||||
#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
|
||||
#define __is_large_section(sbi) (SEGS_PER_SEC(sbi) > 1)
|
||||
|
||||
#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
|
||||
|
||||
@ -3454,11 +3471,9 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
|
||||
static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr, int type)
|
||||
{
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
|
||||
f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
|
||||
blkaddr, type);
|
||||
f2fs_bug_on(sbi, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool __is_valid_data_blkaddr(block_t blkaddr)
|
||||
@ -3560,7 +3575,8 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
|
||||
struct inode *inode, nid_t ino, umode_t mode);
|
||||
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
|
||||
struct inode *dir, struct inode *inode);
|
||||
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
|
||||
int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
|
||||
struct f2fs_filename *fname);
|
||||
bool f2fs_empty_dir(struct inode *dir);
|
||||
|
||||
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
|
||||
@ -3675,15 +3691,14 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
|
||||
void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
|
||||
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
|
||||
bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
|
||||
void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
|
||||
int f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
|
||||
void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
|
||||
void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
|
||||
void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
|
||||
unsigned int *newseg, bool new_sec, int dir);
|
||||
void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
int f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
unsigned int start, unsigned int end);
|
||||
void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
|
||||
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
|
||||
int f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
|
||||
int f2fs_allocate_pinning_section(struct f2fs_sb_info *sbi);
|
||||
int f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
|
||||
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
|
||||
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
|
||||
struct cp_control *cpc);
|
||||
@ -3704,7 +3719,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
|
||||
block_t old_addr, block_t new_addr,
|
||||
unsigned char version, bool recover_curseg,
|
||||
bool recover_newaddr);
|
||||
void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
block_t old_blkaddr, block_t *new_blkaddr,
|
||||
struct f2fs_summary *sum, int type,
|
||||
struct f2fs_io_info *fio);
|
||||
@ -3754,6 +3769,8 @@ struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
|
||||
struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
|
||||
bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr, int type);
|
||||
bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr, int type);
|
||||
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
|
||||
int type, bool sync);
|
||||
void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
|
||||
@ -3794,6 +3811,7 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
|
||||
*/
|
||||
int __init f2fs_init_bioset(void);
|
||||
void f2fs_destroy_bioset(void);
|
||||
bool f2fs_is_cp_guaranteed(struct page *page);
|
||||
int f2fs_init_bio_entry_cache(void);
|
||||
void f2fs_destroy_bio_entry_cache(void);
|
||||
void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
|
||||
@ -3857,6 +3875,9 @@ void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
|
||||
block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
|
||||
int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
|
||||
void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
|
||||
int f2fs_gc_range(struct f2fs_sb_info *sbi,
|
||||
unsigned int start_seg, unsigned int end_seg,
|
||||
bool dry_run, unsigned int dry_run_sections);
|
||||
int f2fs_resize_fs(struct file *filp, __u64 block_count);
|
||||
int __init f2fs_create_garbage_collection_cache(void);
|
||||
void f2fs_destroy_garbage_collection_cache(void);
|
||||
@ -4277,7 +4298,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
|
||||
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
|
||||
bool in_task);
|
||||
void f2fs_put_page_dic(struct page *page, bool in_task);
|
||||
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
|
||||
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
|
||||
unsigned int ofs_in_node);
|
||||
int f2fs_init_compress_ctx(struct compress_ctx *cc);
|
||||
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
|
||||
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
|
||||
@ -4334,7 +4356,8 @@ static inline void f2fs_put_page_dic(struct page *page, bool in_task)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
|
||||
static inline unsigned int f2fs_cluster_blocks_are_contiguous(
|
||||
struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
|
||||
static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
|
||||
static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
|
||||
static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
|
||||
@ -4391,15 +4414,24 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
|
||||
{
|
||||
struct f2fs_inode_info *fi = F2FS_I(inode);
|
||||
|
||||
if (!f2fs_compressed_file(inode))
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
|
||||
if (!f2fs_compressed_file(inode)) {
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
return true;
|
||||
if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
|
||||
}
|
||||
if (f2fs_is_mmap_file(inode) ||
|
||||
(S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
return false;
|
||||
}
|
||||
|
||||
fi->i_flags &= ~F2FS_COMPR_FL;
|
||||
stat_dec_compr_inode(inode);
|
||||
clear_inode_flag(inode, FI_COMPRESSED_FILE);
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -4502,6 +4534,17 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
|
||||
return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
|
||||
}
|
||||
|
||||
static inline bool f2fs_valid_pinned_area(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr)
|
||||
{
|
||||
if (f2fs_sb_has_blkzoned(sbi)) {
|
||||
int devi = f2fs_target_device_index(sbi, blkaddr);
|
||||
|
||||
return !bdev_is_zoned(FDEV(devi).bdev);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
|
||||
@ -4603,10 +4646,36 @@ static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
|
||||
return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
|
||||
}
|
||||
|
||||
static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr, unsigned int cnt)
|
||||
{
|
||||
bool need_submit = false;
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
struct page *page;
|
||||
|
||||
page = find_get_page(META_MAPPING(sbi), blkaddr + i);
|
||||
if (page) {
|
||||
if (PageWriteback(page))
|
||||
need_submit = true;
|
||||
f2fs_put_page(page, 0);
|
||||
}
|
||||
} while (++i < cnt && !need_submit);
|
||||
|
||||
if (need_submit)
|
||||
f2fs_submit_merged_write_cond(sbi, sbi->meta_inode,
|
||||
NULL, 0, DATA);
|
||||
|
||||
truncate_inode_pages_range(META_MAPPING(sbi),
|
||||
F2FS_BLK_TO_BYTES((loff_t)blkaddr),
|
||||
F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1)));
|
||||
}
|
||||
|
||||
static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr)
|
||||
{
|
||||
invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr);
|
||||
f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
|
||||
f2fs_invalidate_compress_page(sbi, blkaddr);
|
||||
}
|
||||
|
||||
|
175
fs/f2fs/file.c
175
fs/f2fs/file.c
@ -39,6 +39,7 @@
|
||||
static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct inode *inode = file_inode(vmf->vma->vm_file);
|
||||
vm_flags_t flags = vmf->vma->vm_flags;
|
||||
vm_fault_t ret;
|
||||
|
||||
ret = filemap_fault(vmf);
|
||||
@ -46,7 +47,7 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
|
||||
f2fs_update_iostat(F2FS_I_SB(inode), inode,
|
||||
APP_MAPPED_READ_IO, F2FS_BLKSIZE);
|
||||
|
||||
trace_f2fs_filemap_fault(inode, vmf->pgoff, vmf->vma->vm_flags, ret);
|
||||
trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -394,9 +395,20 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
return f2fs_do_sync_file(file, start, end, datasync, false);
|
||||
}
|
||||
|
||||
static bool __found_offset(struct address_space *mapping, block_t blkaddr,
|
||||
pgoff_t index, int whence)
|
||||
static bool __found_offset(struct address_space *mapping,
|
||||
struct dnode_of_data *dn, pgoff_t index, int whence)
|
||||
{
|
||||
block_t blkaddr = f2fs_data_blkaddr(dn);
|
||||
struct inode *inode = mapping->host;
|
||||
bool compressed_cluster = false;
|
||||
|
||||
if (f2fs_compressed_file(inode)) {
|
||||
block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
||||
ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size));
|
||||
|
||||
compressed_cluster = first_blkaddr == COMPRESS_ADDR;
|
||||
}
|
||||
|
||||
switch (whence) {
|
||||
case SEEK_DATA:
|
||||
if (__is_valid_data_blkaddr(blkaddr))
|
||||
@ -404,8 +416,12 @@ static bool __found_offset(struct address_space *mapping, block_t blkaddr,
|
||||
if (blkaddr == NEW_ADDR &&
|
||||
xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
|
||||
return true;
|
||||
if (compressed_cluster)
|
||||
return true;
|
||||
break;
|
||||
case SEEK_HOLE:
|
||||
if (compressed_cluster)
|
||||
return false;
|
||||
if (blkaddr == NULL_ADDR)
|
||||
return true;
|
||||
break;
|
||||
@ -474,7 +490,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (__found_offset(file->f_mapping, blkaddr,
|
||||
if (__found_offset(file->f_mapping, &dn,
|
||||
pgofs, whence)) {
|
||||
f2fs_put_dnode(&dn);
|
||||
goto found;
|
||||
@ -590,8 +606,10 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
|
||||
f2fs_set_data_blkaddr(dn, NULL_ADDR);
|
||||
|
||||
if (__is_valid_data_blkaddr(blkaddr)) {
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
|
||||
DATA_GENERIC_ENHANCE))
|
||||
if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE))
|
||||
continue;
|
||||
if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr,
|
||||
DATA_GENERIC_ENHANCE))
|
||||
continue;
|
||||
if (compressed_cluster)
|
||||
valid_blocks++;
|
||||
@ -818,8 +836,6 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw)
|
||||
*/
|
||||
if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
|
||||
return true;
|
||||
if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
|
||||
return true;
|
||||
if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
|
||||
return true;
|
||||
|
||||
@ -1192,7 +1208,6 @@ next_dnode:
|
||||
!f2fs_is_valid_blkaddr(sbi, *blkaddr,
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
f2fs_put_dnode(&dn);
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
@ -1478,7 +1493,6 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
|
||||
if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
ret = -EFSCORRUPTED;
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1662,10 +1676,12 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
}
|
||||
filemap_invalidate_unlock(mapping);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* write out all moved pages, if possible */
|
||||
filemap_invalidate_lock(mapping);
|
||||
filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
|
||||
ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
|
||||
truncate_pagecache(inode, offset);
|
||||
filemap_invalidate_unlock(mapping);
|
||||
|
||||
@ -1731,9 +1747,11 @@ next_alloc:
|
||||
|
||||
f2fs_down_write(&sbi->pin_sem);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
|
||||
f2fs_unlock_op(sbi);
|
||||
err = f2fs_allocate_pinning_section(sbi);
|
||||
if (err) {
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
|
||||
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
|
||||
@ -2066,7 +2084,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
if (!f2fs_disable_compressed_file(inode)) {
|
||||
if (!f2fs_disable_compressed_file(inode) ||
|
||||
f2fs_is_pinned_file(inode)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -2243,8 +2262,11 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
||||
case F2FS_GOING_DOWN_METASYNC:
|
||||
/* do checkpoint only */
|
||||
ret = f2fs_sync_fs(sb, 1);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
if (ret == -EIO)
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
|
||||
break;
|
||||
case F2FS_GOING_DOWN_NOSYNC:
|
||||
@ -2260,6 +2282,8 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
||||
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||
/* do checkpoint only */
|
||||
ret = f2fs_sync_fs(sb, 1);
|
||||
if (ret == -EIO)
|
||||
ret = 0;
|
||||
goto out;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -2578,7 +2602,6 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
|
||||
.m_may_create = false };
|
||||
struct extent_info ei = {};
|
||||
pgoff_t pg_start, pg_end, next_pgofs;
|
||||
unsigned int blk_per_seg = sbi->blocks_per_seg;
|
||||
unsigned int total = 0, sec_num;
|
||||
block_t blk_end = 0;
|
||||
bool fragmented = false;
|
||||
@ -2687,7 +2710,8 @@ do_map:
|
||||
set_inode_flag(inode, FI_SKIP_WRITES);
|
||||
|
||||
idx = map.m_lblk;
|
||||
while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
|
||||
while (idx < map.m_lblk + map.m_len &&
|
||||
cnt < BLKS_PER_SEG(sbi)) {
|
||||
struct page *page;
|
||||
|
||||
page = f2fs_get_lock_data_page(inode, idx, true);
|
||||
@ -2707,7 +2731,7 @@ do_map:
|
||||
|
||||
map.m_lblk = idx;
|
||||
check:
|
||||
if (map.m_lblk < pg_end && cnt < blk_per_seg)
|
||||
if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi))
|
||||
goto do_map;
|
||||
|
||||
clear_inode_flag(inode, FI_SKIP_WRITES);
|
||||
@ -2976,8 +3000,8 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
|
||||
|
||||
if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
|
||||
__is_large_section(sbi)) {
|
||||
f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
|
||||
range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
|
||||
f2fs_warn(sbi, "Can't flush %u in %d for SEGS_PER_SEC %u != 1",
|
||||
range.dev_num, sbi->s_ndevs, SEGS_PER_SEC(sbi));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -3183,6 +3207,7 @@ int f2fs_pin_file_control(struct inode *inode, bool inc)
|
||||
static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
|
||||
{
|
||||
struct inode *inode = file_inode(filp);
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
__u32 pin;
|
||||
int ret = 0;
|
||||
|
||||
@ -3192,7 +3217,7 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return -EINVAL;
|
||||
|
||||
if (f2fs_readonly(F2FS_I_SB(inode)->sb))
|
||||
if (f2fs_readonly(sbi->sb))
|
||||
return -EROFS;
|
||||
|
||||
ret = mnt_want_write_file(filp);
|
||||
@ -3205,9 +3230,18 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
|
||||
clear_inode_flag(inode, FI_PIN_FILE);
|
||||
f2fs_i_gc_failures_write(inode, 0);
|
||||
goto done;
|
||||
} else if (f2fs_is_pinned_file(inode)) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (f2fs_should_update_outplace(inode, NULL)) {
|
||||
if (f2fs_sb_has_blkzoned(sbi) && F2FS_HAS_BLOCKS(inode)) {
|
||||
ret = -EFBIG;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Let's allow file pinning on zoned device. */
|
||||
if (!f2fs_sb_has_blkzoned(sbi) &&
|
||||
f2fs_should_update_outplace(inode, NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -3229,7 +3263,7 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
|
||||
set_inode_flag(inode, FI_PIN_FILE);
|
||||
ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
|
||||
done:
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
f2fs_update_time(sbi, REQ_TIME);
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
mnt_drop_write_file(filp);
|
||||
@ -3438,10 +3472,8 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
|
||||
if (!__is_valid_data_blkaddr(blkaddr))
|
||||
continue;
|
||||
if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
|
||||
DATA_GENERIC_ENHANCE))) {
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
DATA_GENERIC_ENHANCE)))
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
}
|
||||
|
||||
while (count) {
|
||||
@ -3588,10 +3620,10 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
|
||||
static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
|
||||
unsigned int *reserved_blocks)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
|
||||
unsigned int reserved_blocks = 0;
|
||||
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
|
||||
block_t blkaddr;
|
||||
int i;
|
||||
@ -3603,10 +3635,8 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
|
||||
if (!__is_valid_data_blkaddr(blkaddr))
|
||||
continue;
|
||||
if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
|
||||
DATA_GENERIC_ENHANCE))) {
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
DATA_GENERIC_ENHANCE)))
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
}
|
||||
|
||||
while (count) {
|
||||
@ -3614,40 +3644,53 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
|
||||
blkcnt_t reserved;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
|
||||
blkaddr = f2fs_data_blkaddr(dn);
|
||||
for (i = 0; i < cluster_size; i++) {
|
||||
blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
||||
dn->ofs_in_node + i);
|
||||
|
||||
if (i == 0) {
|
||||
if (blkaddr == COMPRESS_ADDR)
|
||||
continue;
|
||||
dn->ofs_in_node += cluster_size;
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (__is_valid_data_blkaddr(blkaddr)) {
|
||||
compr_blocks++;
|
||||
if (blkaddr != COMPRESS_ADDR) {
|
||||
dn->ofs_in_node += cluster_size;
|
||||
goto next;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
f2fs_set_data_blkaddr(dn, NEW_ADDR);
|
||||
/*
|
||||
* compressed cluster was not released due to it
|
||||
* fails in release_compress_blocks(), so NEW_ADDR
|
||||
* is a possible case.
|
||||
*/
|
||||
if (blkaddr == NEW_ADDR ||
|
||||
__is_valid_data_blkaddr(blkaddr)) {
|
||||
compr_blocks++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
reserved = cluster_size - compr_blocks;
|
||||
ret = inc_valid_block_count(sbi, dn->inode, &reserved);
|
||||
if (ret)
|
||||
|
||||
/* for the case all blocks in cluster were reserved */
|
||||
if (reserved == 1)
|
||||
goto next;
|
||||
|
||||
ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
if (reserved != cluster_size - compr_blocks)
|
||||
return -ENOSPC;
|
||||
for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
|
||||
if (f2fs_data_blkaddr(dn) == NULL_ADDR)
|
||||
f2fs_set_data_blkaddr(dn, NEW_ADDR);
|
||||
}
|
||||
|
||||
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
|
||||
|
||||
reserved_blocks += reserved;
|
||||
*reserved_blocks += reserved;
|
||||
next:
|
||||
count -= cluster_size;
|
||||
}
|
||||
|
||||
return reserved_blocks;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
@ -3671,9 +3714,6 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
|
||||
goto out;
|
||||
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
inode_lock(inode);
|
||||
@ -3683,6 +3723,9 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
goto unlock_inode;
|
||||
}
|
||||
|
||||
if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
|
||||
goto unlock_inode;
|
||||
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
@ -3708,7 +3751,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
|
||||
count = round_up(count, F2FS_I(inode)->i_cluster_size);
|
||||
|
||||
ret = reserve_compress_blocks(&dn, count);
|
||||
ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
|
||||
|
||||
f2fs_put_dnode(&dn);
|
||||
|
||||
@ -3716,23 +3759,21 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
break;
|
||||
|
||||
page_idx += count;
|
||||
reserved_blocks += ret;
|
||||
}
|
||||
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
if (ret >= 0) {
|
||||
if (!ret) {
|
||||
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
|
||||
inode_set_ctime_current(inode);
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
}
|
||||
unlock_inode:
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
mnt_drop_write_file(filp);
|
||||
|
||||
if (ret >= 0) {
|
||||
if (!ret) {
|
||||
ret = put_user(reserved_blocks, (u64 __user *)arg);
|
||||
} else if (reserved_blocks &&
|
||||
atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
|
||||
@ -3877,8 +3918,6 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
ret = -EFSCORRUPTED;
|
||||
f2fs_put_dnode(&dn);
|
||||
f2fs_handle_error(sbi,
|
||||
ERROR_INVALID_BLKADDR);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3981,16 +4020,20 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
|
||||
sizeof(option)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!f2fs_compressed_file(inode) ||
|
||||
option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
|
||||
option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
|
||||
option.algorithm >= COMPRESS_MAX)
|
||||
if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
|
||||
option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
|
||||
option.algorithm >= COMPRESS_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
file_start_write(filp);
|
||||
inode_lock(inode);
|
||||
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
if (!f2fs_compressed_file(inode)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
@ -4066,7 +4109,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
struct f2fs_inode_info *fi = F2FS_I(inode);
|
||||
pgoff_t page_idx = 0, last_idx;
|
||||
unsigned int blk_per_seg = sbi->blocks_per_seg;
|
||||
int cluster_size = fi->i_cluster_size;
|
||||
int count, ret;
|
||||
|
||||
@ -4110,7 +4152,7 @@ static int f2fs_ioc_decompress_file(struct file *filp)
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (get_dirty_pages(inode) >= blk_per_seg) {
|
||||
if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
|
||||
ret = filemap_fdatawrite(inode->i_mapping);
|
||||
if (ret < 0)
|
||||
break;
|
||||
@ -4145,7 +4187,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
|
||||
struct inode *inode = file_inode(filp);
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
pgoff_t page_idx = 0, last_idx;
|
||||
unsigned int blk_per_seg = sbi->blocks_per_seg;
|
||||
int cluster_size = F2FS_I(inode)->i_cluster_size;
|
||||
int count, ret;
|
||||
|
||||
@ -4188,7 +4229,7 @@ static int f2fs_ioc_compress_file(struct file *filp)
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (get_dirty_pages(inode) >= blk_per_seg) {
|
||||
if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) {
|
||||
ret = filemap_fdatawrite(inode->i_mapping);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
131
fs/f2fs/gc.c
131
fs/f2fs/gc.c
@ -259,7 +259,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
|
||||
p->ofs_unit = 1;
|
||||
} else {
|
||||
p->gc_mode = select_gc_type(sbi, gc_type);
|
||||
p->ofs_unit = sbi->segs_per_sec;
|
||||
p->ofs_unit = SEGS_PER_SEC(sbi);
|
||||
if (__is_large_section(sbi)) {
|
||||
p->dirty_bitmap = dirty_i->dirty_secmap;
|
||||
p->max_search = count_bits(p->dirty_bitmap,
|
||||
@ -280,11 +280,11 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
|
||||
p->max_search > sbi->max_victim_search)
|
||||
p->max_search = sbi->max_victim_search;
|
||||
|
||||
/* let's select beginning hot/small space first in no_heap mode*/
|
||||
/* let's select beginning hot/small space first. */
|
||||
if (f2fs_need_rand_seg(sbi))
|
||||
p->offset = get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
|
||||
else if (test_opt(sbi, NOHEAP) &&
|
||||
(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
|
||||
p->offset = get_random_u32_below(MAIN_SECS(sbi) *
|
||||
SEGS_PER_SEC(sbi));
|
||||
else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
|
||||
p->offset = 0;
|
||||
else
|
||||
p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
|
||||
@ -295,13 +295,13 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
|
||||
{
|
||||
/* SSR allocates in a segment unit */
|
||||
if (p->alloc_mode == SSR)
|
||||
return sbi->blocks_per_seg;
|
||||
return BLKS_PER_SEG(sbi);
|
||||
else if (p->alloc_mode == AT_SSR)
|
||||
return UINT_MAX;
|
||||
|
||||
/* LFS */
|
||||
if (p->gc_mode == GC_GREEDY)
|
||||
return 2 * sbi->blocks_per_seg * p->ofs_unit;
|
||||
return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
|
||||
else if (p->gc_mode == GC_CB)
|
||||
return UINT_MAX;
|
||||
else if (p->gc_mode == GC_AT)
|
||||
@ -348,7 +348,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
|
||||
mtime = div_u64(mtime, usable_segs_per_sec);
|
||||
vblocks = div_u64(vblocks, usable_segs_per_sec);
|
||||
|
||||
u = (vblocks * 100) >> sbi->log_blocks_per_seg;
|
||||
u = BLKS_TO_SEGS(sbi, vblocks * 100);
|
||||
|
||||
/* Handle if the system time has changed by the user */
|
||||
if (mtime < sit_i->min_mtime)
|
||||
@ -496,9 +496,9 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < sbi->segs_per_sec; i++)
|
||||
for (i = 0; i < SEGS_PER_SEC(sbi); i++)
|
||||
mtime += get_seg_entry(sbi, start + i)->mtime;
|
||||
mtime = div_u64(mtime, sbi->segs_per_sec);
|
||||
mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
|
||||
|
||||
/* Handle if the system time has changed by the user */
|
||||
if (mtime < sit_i->min_mtime)
|
||||
@ -599,7 +599,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
|
||||
unsigned long long age;
|
||||
unsigned long long max_mtime = sit_i->dirty_max_mtime;
|
||||
unsigned long long min_mtime = sit_i->dirty_min_mtime;
|
||||
unsigned int seg_blocks = sbi->blocks_per_seg;
|
||||
unsigned int vblocks;
|
||||
unsigned int dirty_threshold = max(am->max_candidate_count,
|
||||
am->candidate_ratio *
|
||||
@ -629,7 +628,7 @@ next_node:
|
||||
f2fs_bug_on(sbi, !vblocks);
|
||||
|
||||
/* rare case */
|
||||
if (vblocks == seg_blocks)
|
||||
if (vblocks == BLKS_PER_SEG(sbi))
|
||||
goto skip_node;
|
||||
|
||||
iter++;
|
||||
@ -755,7 +754,7 @@ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dirty_i->seglist_lock);
|
||||
last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
|
||||
last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
|
||||
|
||||
p.alloc_mode = alloc_mode;
|
||||
p.age = age;
|
||||
@ -896,7 +895,7 @@ next:
|
||||
else
|
||||
sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
|
||||
sm->last_victim[p.gc_mode] %=
|
||||
(MAIN_SECS(sbi) * sbi->segs_per_sec);
|
||||
(MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1184,7 +1183,6 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
|
||||
.op_flags = 0,
|
||||
.encrypted_page = NULL,
|
||||
.in_list = 0,
|
||||
.retry = 0,
|
||||
};
|
||||
int err;
|
||||
|
||||
@ -1197,7 +1195,6 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
|
||||
if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
|
||||
DATA_GENERIC_ENHANCE_READ))) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
goto put_page;
|
||||
}
|
||||
goto got_it;
|
||||
@ -1216,7 +1213,6 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
|
||||
if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
|
||||
DATA_GENERIC_ENHANCE))) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
goto put_page;
|
||||
}
|
||||
got_it:
|
||||
@ -1273,7 +1269,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
.op_flags = 0,
|
||||
.encrypted_page = NULL,
|
||||
.in_list = 0,
|
||||
.retry = 0,
|
||||
};
|
||||
struct dnode_of_data dn;
|
||||
struct f2fs_summary sum;
|
||||
@ -1364,8 +1359,13 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
|
||||
|
||||
/* allocate block address */
|
||||
f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
|
||||
err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
|
||||
&sum, type, NULL);
|
||||
if (err) {
|
||||
f2fs_put_page(mpage, 1);
|
||||
/* filesystem should shutdown, no need to recovery block */
|
||||
goto up_out;
|
||||
}
|
||||
|
||||
fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
|
||||
newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
|
||||
@ -1393,18 +1393,12 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
fio.op_flags = REQ_SYNC;
|
||||
fio.new_blkaddr = newaddr;
|
||||
f2fs_submit_page_write(&fio);
|
||||
if (fio.retry) {
|
||||
err = -EAGAIN;
|
||||
if (PageWriteback(fio.encrypted_page))
|
||||
end_page_writeback(fio.encrypted_page);
|
||||
goto put_page_out;
|
||||
}
|
||||
|
||||
f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
|
||||
|
||||
f2fs_update_data_blkaddr(&dn, newaddr);
|
||||
set_inode_flag(inode, FI_APPEND_WRITE);
|
||||
put_page_out:
|
||||
|
||||
f2fs_put_page(fio.encrypted_page, 1);
|
||||
recover_block:
|
||||
if (err)
|
||||
@ -1678,7 +1672,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
|
||||
struct f2fs_summary_block *sum;
|
||||
struct blk_plug plug;
|
||||
unsigned int segno = start_segno;
|
||||
unsigned int end_segno = start_segno + sbi->segs_per_sec;
|
||||
unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
|
||||
int seg_freed = 0, migrated = 0;
|
||||
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
|
||||
SUM_TYPE_DATA : SUM_TYPE_NODE;
|
||||
@ -1686,7 +1680,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
|
||||
int submitted = 0;
|
||||
|
||||
if (__is_large_section(sbi))
|
||||
end_segno = rounddown(end_segno, sbi->segs_per_sec);
|
||||
end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
|
||||
|
||||
/*
|
||||
* zone-capacity can be less than zone-size in zoned devices,
|
||||
@ -1694,7 +1688,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
|
||||
* calculate the end segno in the zone which can be garbage collected
|
||||
*/
|
||||
if (f2fs_sb_has_blkzoned(sbi))
|
||||
end_segno -= sbi->segs_per_sec -
|
||||
end_segno -= SEGS_PER_SEC(sbi) -
|
||||
f2fs_usable_segs_in_sec(sbi, segno);
|
||||
|
||||
sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
|
||||
@ -1983,10 +1977,43 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
|
||||
init_atgc_management(sbi);
|
||||
}
|
||||
|
||||
static int free_segment_range(struct f2fs_sb_info *sbi,
|
||||
unsigned int secs, bool gc_only)
|
||||
int f2fs_gc_range(struct f2fs_sb_info *sbi,
|
||||
unsigned int start_seg, unsigned int end_seg,
|
||||
bool dry_run, unsigned int dry_run_sections)
|
||||
{
|
||||
unsigned int segno, next_inuse, start, end;
|
||||
unsigned int segno;
|
||||
unsigned int gc_secs = dry_run_sections;
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi)))
|
||||
return -EIO;
|
||||
|
||||
for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
|
||||
struct gc_inode_list gc_list = {
|
||||
.ilist = LIST_HEAD_INIT(gc_list.ilist),
|
||||
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
|
||||
};
|
||||
|
||||
do_garbage_collect(sbi, segno, &gc_list, FG_GC,
|
||||
dry_run_sections == 0);
|
||||
put_gc_inode(&gc_list);
|
||||
|
||||
if (!dry_run && get_valid_blocks(sbi, segno, true))
|
||||
return -EAGAIN;
|
||||
if (dry_run && dry_run_sections &&
|
||||
!get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
|
||||
break;
|
||||
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int free_segment_range(struct f2fs_sb_info *sbi,
|
||||
unsigned int secs, bool dry_run)
|
||||
{
|
||||
unsigned int next_inuse, start, end;
|
||||
struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
|
||||
int gc_mode, gc_type;
|
||||
int err = 0;
|
||||
@ -1994,7 +2021,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
|
||||
|
||||
/* Force block allocation for GC */
|
||||
MAIN_SECS(sbi) -= secs;
|
||||
start = MAIN_SECS(sbi) * sbi->segs_per_sec;
|
||||
start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
|
||||
end = MAIN_SEGS(sbi) - 1;
|
||||
|
||||
mutex_lock(&DIRTY_I(sbi)->seglist_lock);
|
||||
@ -2008,29 +2035,15 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
|
||||
mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
|
||||
|
||||
/* Move out cursegs from the target range */
|
||||
for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
|
||||
f2fs_allocate_segment_for_resize(sbi, type, start, end);
|
||||
for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
|
||||
err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* do GC to move out valid blocks in the range */
|
||||
for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
|
||||
struct gc_inode_list gc_list = {
|
||||
.ilist = LIST_HEAD_INIT(gc_list.ilist),
|
||||
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
|
||||
};
|
||||
|
||||
do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
|
||||
put_gc_inode(&gc_list);
|
||||
|
||||
if (!gc_only && get_valid_blocks(sbi, segno, true)) {
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
if (fatal_signal_pending(current)) {
|
||||
err = -ERESTARTSYS;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (gc_only)
|
||||
err = f2fs_gc_range(sbi, start, end, dry_run, 0);
|
||||
if (err || dry_run)
|
||||
goto out;
|
||||
|
||||
stat_inc_cp_call_count(sbi, TOTAL_CALL);
|
||||
@ -2056,7 +2069,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
int segment_count;
|
||||
int segment_count_main;
|
||||
long long block_count;
|
||||
int segs = secs * sbi->segs_per_sec;
|
||||
int segs = secs * SEGS_PER_SEC(sbi);
|
||||
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
@ -2069,7 +2082,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
raw_sb->segment_count = cpu_to_le32(segment_count + segs);
|
||||
raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
|
||||
raw_sb->block_count = cpu_to_le64(block_count +
|
||||
(long long)segs * sbi->blocks_per_seg);
|
||||
(long long)SEGS_TO_BLKS(sbi, segs));
|
||||
if (f2fs_is_multi_device(sbi)) {
|
||||
int last_dev = sbi->s_ndevs - 1;
|
||||
int dev_segs =
|
||||
@ -2084,8 +2097,8 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
|
||||
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
{
|
||||
int segs = secs * sbi->segs_per_sec;
|
||||
long long blks = (long long)segs * sbi->blocks_per_seg;
|
||||
int segs = secs * SEGS_PER_SEC(sbi);
|
||||
long long blks = SEGS_TO_BLKS(sbi, segs);
|
||||
long long user_block_count =
|
||||
le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
|
||||
|
||||
@ -2127,7 +2140,7 @@ int f2fs_resize_fs(struct file *filp, __u64 block_count)
|
||||
int last_dev = sbi->s_ndevs - 1;
|
||||
__u64 last_segs = FDEV(last_dev).total_segments;
|
||||
|
||||
if (block_count + last_segs * sbi->blocks_per_seg <=
|
||||
if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
|
||||
old_block_count)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ static inline block_t free_segs_blk_count(struct f2fs_sb_info *sbi)
|
||||
if (f2fs_sb_has_blkzoned(sbi))
|
||||
return free_segs_blk_count_zoned(sbi);
|
||||
|
||||
return free_segments(sbi) << sbi->log_blocks_per_seg;
|
||||
return SEGS_TO_BLKS(sbi, free_segments(sbi));
|
||||
}
|
||||
|
||||
static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
|
||||
@ -104,7 +104,7 @@ static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
|
||||
block_t free_blks, ovp_blks;
|
||||
|
||||
free_blks = free_segs_blk_count(sbi);
|
||||
ovp_blks = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
|
||||
ovp_blks = SEGS_TO_BLKS(sbi, overprovision_segments(sbi));
|
||||
|
||||
if (free_blks < ovp_blks)
|
||||
return 0;
|
||||
|
@ -851,7 +851,7 @@ out:
|
||||
|
||||
static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
|
||||
struct file *file, umode_t mode, bool is_whiteout,
|
||||
struct inode **new_inode)
|
||||
struct inode **new_inode, struct f2fs_filename *fname)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
|
||||
struct inode *inode;
|
||||
@ -879,7 +879,7 @@ static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = f2fs_do_tmpfile(inode, dir);
|
||||
err = f2fs_do_tmpfile(inode, dir, fname);
|
||||
if (err)
|
||||
goto release_out;
|
||||
|
||||
@ -930,22 +930,24 @@ static int f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
|
||||
if (!f2fs_is_checkpoint_ready(sbi))
|
||||
return -ENOSPC;
|
||||
|
||||
err = __f2fs_tmpfile(idmap, dir, file, mode, false, NULL);
|
||||
err = __f2fs_tmpfile(idmap, dir, file, mode, false, NULL, NULL);
|
||||
|
||||
return finish_open_simple(file, err);
|
||||
}
|
||||
|
||||
static int f2fs_create_whiteout(struct mnt_idmap *idmap,
|
||||
struct inode *dir, struct inode **whiteout)
|
||||
struct inode *dir, struct inode **whiteout,
|
||||
struct f2fs_filename *fname)
|
||||
{
|
||||
return __f2fs_tmpfile(idmap, dir, NULL,
|
||||
S_IFCHR | WHITEOUT_MODE, true, whiteout);
|
||||
return __f2fs_tmpfile(idmap, dir, NULL, S_IFCHR | WHITEOUT_MODE,
|
||||
true, whiteout, fname);
|
||||
}
|
||||
|
||||
int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
|
||||
struct inode **new_inode)
|
||||
{
|
||||
return __f2fs_tmpfile(idmap, dir, NULL, S_IFREG, false, new_inode);
|
||||
return __f2fs_tmpfile(idmap, dir, NULL, S_IFREG,
|
||||
false, new_inode, NULL);
|
||||
}
|
||||
|
||||
static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
@ -989,7 +991,14 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
}
|
||||
|
||||
if (flags & RENAME_WHITEOUT) {
|
||||
err = f2fs_create_whiteout(idmap, old_dir, &whiteout);
|
||||
struct f2fs_filename fname;
|
||||
|
||||
err = f2fs_setup_filename(old_dir, &old_dentry->d_name,
|
||||
0, &fname);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = f2fs_create_whiteout(idmap, old_dir, &whiteout, &fname);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -1104,14 +1113,11 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
||||
iput(whiteout);
|
||||
}
|
||||
|
||||
if (old_is_dir) {
|
||||
if (old_dir_entry)
|
||||
f2fs_set_link(old_inode, old_dir_entry,
|
||||
old_dir_page, new_dir);
|
||||
else
|
||||
f2fs_put_page(old_dir_page, 0);
|
||||
if (old_dir_entry)
|
||||
f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir);
|
||||
if (old_is_dir)
|
||||
f2fs_i_links_write(old_dir, false);
|
||||
}
|
||||
|
||||
if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) {
|
||||
f2fs_add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
|
||||
if (S_ISDIR(old_inode->i_mode))
|
||||
|
@ -852,21 +852,29 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
|
||||
|
||||
if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
|
||||
f2fs_sb_has_readonly(sbi)) {
|
||||
unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
|
||||
unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
|
||||
unsigned int ofs_in_node = dn->ofs_in_node;
|
||||
pgoff_t fofs = index;
|
||||
unsigned int c_len;
|
||||
block_t blkaddr;
|
||||
|
||||
/* should align fofs and ofs_in_node to cluster_size */
|
||||
if (fofs % cluster_size) {
|
||||
fofs = round_down(fofs, cluster_size);
|
||||
ofs_in_node = round_down(ofs_in_node, cluster_size);
|
||||
}
|
||||
|
||||
c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
|
||||
if (!c_len)
|
||||
goto out;
|
||||
|
||||
blkaddr = f2fs_data_blkaddr(dn);
|
||||
blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
|
||||
if (blkaddr == COMPRESS_ADDR)
|
||||
blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
||||
dn->ofs_in_node + 1);
|
||||
ofs_in_node + 1);
|
||||
|
||||
f2fs_update_read_extent_tree_range_compressed(dn->inode,
|
||||
index, blkaddr,
|
||||
F2FS_I(dn->inode)->i_cluster_size,
|
||||
c_len);
|
||||
fofs, blkaddr, cluster_size, c_len);
|
||||
}
|
||||
out:
|
||||
return 0;
|
||||
@ -1919,7 +1927,7 @@ void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
|
||||
for (i = 0; i < nr_folios; i++) {
|
||||
struct page *page = &fbatch.folios[i]->page;
|
||||
|
||||
if (!IS_DNODE(page))
|
||||
if (!IS_INODE(page))
|
||||
continue;
|
||||
|
||||
lock_page(page);
|
||||
@ -2841,7 +2849,7 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
|
||||
int i, idx, last_offset, nrpages;
|
||||
|
||||
/* scan the node segment */
|
||||
last_offset = sbi->blocks_per_seg;
|
||||
last_offset = BLKS_PER_SEG(sbi);
|
||||
addr = START_BLOCK(sbi, segno);
|
||||
sum_entry = &sum->entries[0];
|
||||
|
||||
@ -3158,7 +3166,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
|
||||
if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
|
||||
return 0;
|
||||
|
||||
nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
|
||||
nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
|
||||
nm_i->nat_bits_blocks;
|
||||
for (i = 0; i < nm_i->nat_bits_blocks; i++) {
|
||||
struct page *page;
|
||||
|
@ -208,10 +208,10 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
|
||||
|
||||
block_addr = (pgoff_t)(nm_i->nat_blkaddr +
|
||||
(block_off << 1) -
|
||||
(block_off & (sbi->blocks_per_seg - 1)));
|
||||
(block_off & (BLKS_PER_SEG(sbi) - 1)));
|
||||
|
||||
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
|
||||
block_addr += sbi->blocks_per_seg;
|
||||
block_addr += BLKS_PER_SEG(sbi);
|
||||
|
||||
return block_addr;
|
||||
}
|
||||
|
@ -354,7 +354,7 @@ static unsigned int adjust_por_ra_blocks(struct f2fs_sb_info *sbi,
|
||||
if (blkaddr + 1 == next_blkaddr)
|
||||
ra_blocks = min_t(unsigned int, RECOVERY_MAX_RA_BLOCKS,
|
||||
ra_blocks * 2);
|
||||
else if (next_blkaddr % sbi->blocks_per_seg)
|
||||
else if (next_blkaddr % BLKS_PER_SEG(sbi))
|
||||
ra_blocks = max_t(unsigned int, RECOVERY_MIN_RA_BLOCKS,
|
||||
ra_blocks / 2);
|
||||
return ra_blocks;
|
||||
@ -611,6 +611,19 @@ truncate_out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn)
|
||||
{
|
||||
int i, err = 0;
|
||||
|
||||
for (i = DEFAULT_FAILURE_RETRY_COUNT; i > 0; i--) {
|
||||
err = f2fs_reserve_new_block(dn);
|
||||
if (!err)
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||
struct page *page)
|
||||
{
|
||||
@ -680,14 +693,12 @@ retry_dn:
|
||||
if (__is_valid_data_blkaddr(src) &&
|
||||
!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (__is_valid_data_blkaddr(dest) &&
|
||||
!f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -712,14 +723,8 @@ retry_dn:
|
||||
*/
|
||||
if (dest == NEW_ADDR) {
|
||||
f2fs_truncate_data_blocks_range(&dn, 1);
|
||||
do {
|
||||
err = f2fs_reserve_new_block(&dn);
|
||||
if (err == -ENOSPC) {
|
||||
f2fs_bug_on(sbi, 1);
|
||||
break;
|
||||
}
|
||||
} while (err &&
|
||||
IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
|
||||
|
||||
err = f2fs_reserve_new_block_retry(&dn);
|
||||
if (err)
|
||||
goto err;
|
||||
continue;
|
||||
@ -727,16 +732,8 @@ retry_dn:
|
||||
|
||||
/* dest is valid block, try to recover from src to dest */
|
||||
if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
|
||||
|
||||
if (src == NULL_ADDR) {
|
||||
do {
|
||||
err = f2fs_reserve_new_block(&dn);
|
||||
if (err == -ENOSPC) {
|
||||
f2fs_bug_on(sbi, 1);
|
||||
break;
|
||||
}
|
||||
} while (err &&
|
||||
IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
|
||||
err = f2fs_reserve_new_block_retry(&dn);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
@ -756,8 +753,6 @@ retry_prev:
|
||||
f2fs_err(sbi, "Inconsistent dest blkaddr:%u, ino:%lu, ofs:%u",
|
||||
dest, inode->i_ino, dn.ofs_in_node);
|
||||
err = -EFSCORRUPTED;
|
||||
f2fs_handle_error(sbi,
|
||||
ERROR_INVALID_BLKADDR);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -852,7 +847,7 @@ next:
|
||||
f2fs_ra_meta_pages_cond(sbi, blkaddr, ra_blocks);
|
||||
}
|
||||
if (!err)
|
||||
f2fs_allocate_new_segments(sbi);
|
||||
err = f2fs_allocate_new_segments(sbi);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -864,7 +859,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
|
||||
int ret = 0;
|
||||
unsigned long s_flags = sbi->sb->s_flags;
|
||||
bool need_writecp = false;
|
||||
bool fix_curseg_write_pointer = false;
|
||||
|
||||
if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
|
||||
f2fs_info(sbi, "recover fsync data on readonly fs");
|
||||
@ -895,8 +889,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
|
||||
else
|
||||
f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE);
|
||||
skip:
|
||||
fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
|
||||
|
||||
destroy_fsync_dnodes(&inode_list, err);
|
||||
destroy_fsync_dnodes(&tmp_inode_list, err);
|
||||
|
||||
@ -914,11 +906,13 @@ skip:
|
||||
* and the f2fs is not read only, check and fix zoned block devices'
|
||||
* write pointer consistency.
|
||||
*/
|
||||
if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
|
||||
f2fs_sb_has_blkzoned(sbi)) {
|
||||
err = f2fs_fix_curseg_write_pointer(sbi);
|
||||
if (!err)
|
||||
err = f2fs_check_write_pointer(sbi);
|
||||
if (f2fs_sb_has_blkzoned(sbi) && !f2fs_readonly(sbi->sb)) {
|
||||
int err2 = f2fs_fix_curseg_write_pointer(sbi);
|
||||
|
||||
if (!err2)
|
||||
err2 = f2fs_check_write_pointer(sbi);
|
||||
if (err2)
|
||||
err = err2;
|
||||
ret = err;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -48,21 +48,21 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
|
||||
|
||||
#define IS_CURSEC(sbi, secno) \
|
||||
(((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
|
||||
(sbi)->segs_per_sec) || \
|
||||
SEGS_PER_SEC(sbi)) || \
|
||||
((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
|
||||
(sbi)->segs_per_sec) || \
|
||||
SEGS_PER_SEC(sbi)) || \
|
||||
((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
|
||||
(sbi)->segs_per_sec) || \
|
||||
SEGS_PER_SEC(sbi)) || \
|
||||
((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
|
||||
(sbi)->segs_per_sec) || \
|
||||
SEGS_PER_SEC(sbi)) || \
|
||||
((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
|
||||
(sbi)->segs_per_sec) || \
|
||||
SEGS_PER_SEC(sbi)) || \
|
||||
((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
|
||||
(sbi)->segs_per_sec) || \
|
||||
SEGS_PER_SEC(sbi)) || \
|
||||
((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \
|
||||
(sbi)->segs_per_sec) || \
|
||||
SEGS_PER_SEC(sbi)) || \
|
||||
((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \
|
||||
(sbi)->segs_per_sec))
|
||||
SEGS_PER_SEC(sbi)))
|
||||
|
||||
#define MAIN_BLKADDR(sbi) \
|
||||
(SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
|
||||
@ -77,40 +77,37 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
|
||||
#define TOTAL_SEGS(sbi) \
|
||||
(SM_I(sbi) ? SM_I(sbi)->segment_count : \
|
||||
le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
|
||||
#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
|
||||
#define TOTAL_BLKS(sbi) (SEGS_TO_BLKS(sbi, TOTAL_SEGS(sbi)))
|
||||
|
||||
#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
|
||||
#define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
|
||||
(sbi)->log_blocks_per_seg))
|
||||
|
||||
#define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
|
||||
(GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
|
||||
(SEGS_TO_BLKS(sbi, GET_R2L_SEGNO(FREE_I(sbi), segno))))
|
||||
|
||||
#define NEXT_FREE_BLKADDR(sbi, curseg) \
|
||||
(START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
|
||||
|
||||
#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
|
||||
#define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
|
||||
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
|
||||
(BLKS_TO_SEGS(sbi, GET_SEGOFF_FROM_SEG0(sbi, blk_addr)))
|
||||
#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
|
||||
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
|
||||
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1))
|
||||
|
||||
#define GET_SEGNO(sbi, blk_addr) \
|
||||
((!__is_valid_data_blkaddr(blk_addr)) ? \
|
||||
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
|
||||
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
|
||||
#define BLKS_PER_SEC(sbi) \
|
||||
((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
|
||||
#define CAP_BLKS_PER_SEC(sbi) \
|
||||
((sbi)->segs_per_sec * (sbi)->blocks_per_seg - \
|
||||
(sbi)->unusable_blocks_per_sec)
|
||||
(BLKS_PER_SEC(sbi) - (sbi)->unusable_blocks_per_sec)
|
||||
#define CAP_SEGS_PER_SEC(sbi) \
|
||||
((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
|
||||
(sbi)->log_blocks_per_seg))
|
||||
(SEGS_PER_SEC(sbi) - \
|
||||
BLKS_TO_SEGS(sbi, (sbi)->unusable_blocks_per_sec))
|
||||
#define GET_SEC_FROM_SEG(sbi, segno) \
|
||||
(((segno) == -1) ? -1 : (segno) / (sbi)->segs_per_sec)
|
||||
(((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi))
|
||||
#define GET_SEG_FROM_SEC(sbi, secno) \
|
||||
((secno) * (sbi)->segs_per_sec)
|
||||
((secno) * SEGS_PER_SEC(sbi))
|
||||
#define GET_ZONE_FROM_SEC(sbi, secno) \
|
||||
(((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
|
||||
#define GET_ZONE_FROM_SEG(sbi, segno) \
|
||||
@ -138,16 +135,6 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
|
||||
#define SECTOR_TO_BLOCK(sectors) \
|
||||
((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
|
||||
|
||||
/*
|
||||
* indicate a block allocation direction: RIGHT and LEFT.
|
||||
* RIGHT means allocating new sections towards the end of volume.
|
||||
* LEFT means the opposite direction.
|
||||
*/
|
||||
enum {
|
||||
ALLOC_RIGHT = 0,
|
||||
ALLOC_LEFT
|
||||
};
|
||||
|
||||
/*
|
||||
* In the victim_sel_policy->alloc_mode, there are three block allocation modes.
|
||||
* LFS writes data sequentially with cleaning operations.
|
||||
@ -364,7 +351,7 @@ static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
|
||||
unsigned int blocks = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
|
||||
for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) {
|
||||
struct seg_entry *se = get_seg_entry(sbi, start_segno);
|
||||
|
||||
blocks += se->ckpt_valid_blocks;
|
||||
@ -449,7 +436,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
|
||||
free_i->free_segments++;
|
||||
|
||||
next = find_next_bit(free_i->free_segmap,
|
||||
start_segno + sbi->segs_per_sec, start_segno);
|
||||
start_segno + SEGS_PER_SEC(sbi), start_segno);
|
||||
if (next >= start_segno + usable_segs) {
|
||||
clear_bit(secno, free_i->free_secmap);
|
||||
free_i->free_sections++;
|
||||
@ -485,7 +472,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
|
||||
if (!inmem && IS_CURSEC(sbi, secno))
|
||||
goto skip_free;
|
||||
next = find_next_bit(free_i->free_segmap,
|
||||
start_segno + sbi->segs_per_sec, start_segno);
|
||||
start_segno + SEGS_PER_SEC(sbi), start_segno);
|
||||
if (next >= start_segno + usable_segs) {
|
||||
if (test_and_clear_bit(secno, free_i->free_secmap))
|
||||
free_i->free_sections++;
|
||||
@ -573,23 +560,22 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
|
||||
unsigned int node_blocks, unsigned int dent_blocks)
|
||||
{
|
||||
|
||||
unsigned int segno, left_blocks;
|
||||
unsigned segno, left_blocks;
|
||||
int i;
|
||||
|
||||
/* check current node segment */
|
||||
/* check current node sections in the worst case. */
|
||||
for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
|
||||
segno = CURSEG_I(sbi, i)->segno;
|
||||
left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
|
||||
get_seg_entry(sbi, segno)->ckpt_valid_blocks;
|
||||
|
||||
left_blocks = CAP_BLKS_PER_SEC(sbi) -
|
||||
get_ckpt_valid_blocks(sbi, segno, true);
|
||||
if (node_blocks > left_blocks)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* check current data segment */
|
||||
/* check current data section for dentry blocks. */
|
||||
segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
|
||||
left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
|
||||
get_seg_entry(sbi, segno)->ckpt_valid_blocks;
|
||||
left_blocks = CAP_BLKS_PER_SEC(sbi) -
|
||||
get_ckpt_valid_blocks(sbi, segno, true);
|
||||
if (dent_blocks > left_blocks)
|
||||
return false;
|
||||
return true;
|
||||
@ -638,7 +624,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
|
||||
|
||||
if (free_secs > upper_secs)
|
||||
return false;
|
||||
else if (free_secs <= lower_secs)
|
||||
if (free_secs <= lower_secs)
|
||||
return true;
|
||||
return !curseg_space;
|
||||
}
|
||||
@ -793,10 +779,10 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (usable_blks_per_seg < sbi->blocks_per_seg)
|
||||
if (usable_blks_per_seg < BLKS_PER_SEG(sbi))
|
||||
f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map,
|
||||
sbi->blocks_per_seg,
|
||||
usable_blks_per_seg) != sbi->blocks_per_seg);
|
||||
BLKS_PER_SEG(sbi),
|
||||
usable_blks_per_seg) != BLKS_PER_SEG(sbi));
|
||||
|
||||
/* check segment usage, and check boundary of a given segment number */
|
||||
if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg
|
||||
@ -915,9 +901,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
|
||||
return 0;
|
||||
|
||||
if (type == DATA)
|
||||
return sbi->blocks_per_seg;
|
||||
return BLKS_PER_SEG(sbi);
|
||||
else if (type == NODE)
|
||||
return 8 * sbi->blocks_per_seg;
|
||||
return SEGS_TO_BLKS(sbi, 8);
|
||||
else if (type == META)
|
||||
return 8 * BIO_MAX_VECS;
|
||||
else
|
||||
@ -969,3 +955,13 @@ wake_up:
|
||||
dcc->discard_wake = true;
|
||||
wake_up_interruptible_all(&dcc->discard_wait_queue);
|
||||
}
|
||||
|
||||
static inline unsigned int first_zoned_segno(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
int devi;
|
||||
|
||||
for (devi = 0; devi < sbi->s_ndevs; devi++)
|
||||
if (bdev_is_zoned(FDEV(devi).bdev))
|
||||
return GET_SEGNO(sbi, FDEV(devi).start_blk);
|
||||
return 0;
|
||||
}
|
||||
|
210
fs/f2fs/super.c
210
fs/f2fs/super.c
@ -44,24 +44,26 @@ static struct kmem_cache *f2fs_inode_cachep;
|
||||
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
||||
|
||||
const char *f2fs_fault_name[FAULT_MAX] = {
|
||||
[FAULT_KMALLOC] = "kmalloc",
|
||||
[FAULT_KVMALLOC] = "kvmalloc",
|
||||
[FAULT_PAGE_ALLOC] = "page alloc",
|
||||
[FAULT_PAGE_GET] = "page get",
|
||||
[FAULT_ALLOC_NID] = "alloc nid",
|
||||
[FAULT_ORPHAN] = "orphan",
|
||||
[FAULT_BLOCK] = "no more block",
|
||||
[FAULT_DIR_DEPTH] = "too big dir depth",
|
||||
[FAULT_EVICT_INODE] = "evict_inode fail",
|
||||
[FAULT_TRUNCATE] = "truncate fail",
|
||||
[FAULT_READ_IO] = "read IO error",
|
||||
[FAULT_CHECKPOINT] = "checkpoint error",
|
||||
[FAULT_DISCARD] = "discard error",
|
||||
[FAULT_WRITE_IO] = "write IO error",
|
||||
[FAULT_SLAB_ALLOC] = "slab alloc",
|
||||
[FAULT_DQUOT_INIT] = "dquot initialize",
|
||||
[FAULT_LOCK_OP] = "lock_op",
|
||||
[FAULT_BLKADDR] = "invalid blkaddr",
|
||||
[FAULT_KMALLOC] = "kmalloc",
|
||||
[FAULT_KVMALLOC] = "kvmalloc",
|
||||
[FAULT_PAGE_ALLOC] = "page alloc",
|
||||
[FAULT_PAGE_GET] = "page get",
|
||||
[FAULT_ALLOC_NID] = "alloc nid",
|
||||
[FAULT_ORPHAN] = "orphan",
|
||||
[FAULT_BLOCK] = "no more block",
|
||||
[FAULT_DIR_DEPTH] = "too big dir depth",
|
||||
[FAULT_EVICT_INODE] = "evict_inode fail",
|
||||
[FAULT_TRUNCATE] = "truncate fail",
|
||||
[FAULT_READ_IO] = "read IO error",
|
||||
[FAULT_CHECKPOINT] = "checkpoint error",
|
||||
[FAULT_DISCARD] = "discard error",
|
||||
[FAULT_WRITE_IO] = "write IO error",
|
||||
[FAULT_SLAB_ALLOC] = "slab alloc",
|
||||
[FAULT_DQUOT_INIT] = "dquot initialize",
|
||||
[FAULT_LOCK_OP] = "lock_op",
|
||||
[FAULT_BLKADDR_VALIDITY] = "invalid blkaddr",
|
||||
[FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr",
|
||||
[FAULT_NO_SEGMENT] = "no free segment",
|
||||
};
|
||||
|
||||
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
|
||||
@ -137,7 +139,6 @@ enum {
|
||||
Opt_resgid,
|
||||
Opt_resuid,
|
||||
Opt_mode,
|
||||
Opt_io_size_bits,
|
||||
Opt_fault_injection,
|
||||
Opt_fault_type,
|
||||
Opt_lazytime,
|
||||
@ -216,7 +217,6 @@ static match_table_t f2fs_tokens = {
|
||||
{Opt_resgid, "resgid=%u"},
|
||||
{Opt_resuid, "resuid=%u"},
|
||||
{Opt_mode, "mode=%s"},
|
||||
{Opt_io_size_bits, "io_bits=%u"},
|
||||
{Opt_fault_injection, "fault_injection=%u"},
|
||||
{Opt_fault_type, "fault_type=%u"},
|
||||
{Opt_lazytime, "lazytime"},
|
||||
@ -263,7 +263,8 @@ static match_table_t f2fs_tokens = {
|
||||
{Opt_err, NULL},
|
||||
};
|
||||
|
||||
void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
|
||||
void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
@ -274,8 +275,12 @@ void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
|
||||
level = printk_get_level(fmt);
|
||||
vaf.fmt = printk_skip_level(fmt);
|
||||
vaf.va = &args;
|
||||
printk("%c%cF2FS-fs (%s): %pV\n",
|
||||
KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
|
||||
if (limit_rate)
|
||||
printk_ratelimited("%c%cF2FS-fs (%s): %pV\n",
|
||||
KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
|
||||
else
|
||||
printk("%c%cF2FS-fs (%s): %pV\n",
|
||||
KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
@ -343,46 +348,6 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
|
||||
F2FS_OPTION(sbi).s_resgid));
|
||||
}
|
||||
|
||||
static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
|
||||
unsigned int avg_vblocks;
|
||||
unsigned int wanted_reserved_segments;
|
||||
block_t avail_user_block_count;
|
||||
|
||||
if (!F2FS_IO_ALIGNED(sbi))
|
||||
return 0;
|
||||
|
||||
/* average valid block count in section in worst case */
|
||||
avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
|
||||
|
||||
/*
|
||||
* we need enough free space when migrating one section in worst case
|
||||
*/
|
||||
wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
|
||||
reserved_segments(sbi);
|
||||
wanted_reserved_segments -= reserved_segments(sbi);
|
||||
|
||||
avail_user_block_count = sbi->user_block_count -
|
||||
sbi->current_reserved_blocks -
|
||||
F2FS_OPTION(sbi).root_reserved_blocks;
|
||||
|
||||
if (wanted_reserved_segments * sbi->blocks_per_seg >
|
||||
avail_user_block_count) {
|
||||
f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
|
||||
wanted_reserved_segments,
|
||||
avail_user_block_count >> sbi->log_blocks_per_seg);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
|
||||
|
||||
f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
|
||||
wanted_reserved_segments);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
if (!F2FS_OPTION(sbi).unusable_cap_perc)
|
||||
@ -663,7 +628,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
|
||||
#ifdef CONFIG_F2FS_FS_ZSTD
|
||||
static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
|
||||
{
|
||||
unsigned int level;
|
||||
int level;
|
||||
int len = 4;
|
||||
|
||||
if (strlen(str) == len) {
|
||||
@ -677,9 +642,15 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
|
||||
f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (kstrtouint(str + 1, 10, &level))
|
||||
if (kstrtoint(str + 1, 10, &level))
|
||||
return -EINVAL;
|
||||
|
||||
/* f2fs does not support negative compress level now */
|
||||
if (level < 0) {
|
||||
f2fs_info(sbi, "do not support negative compress level: %d", level);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
|
||||
f2fs_info(sbi, "invalid zstd compress level: %d", level);
|
||||
return -EINVAL;
|
||||
@ -763,10 +734,8 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
clear_opt(sbi, DISCARD);
|
||||
break;
|
||||
case Opt_noheap:
|
||||
set_opt(sbi, NOHEAP);
|
||||
break;
|
||||
case Opt_heap:
|
||||
clear_opt(sbi, NOHEAP);
|
||||
f2fs_warn(sbi, "heap/no_heap options were deprecated");
|
||||
break;
|
||||
#ifdef CONFIG_F2FS_FS_XATTR
|
||||
case Opt_user_xattr:
|
||||
@ -913,16 +882,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
}
|
||||
kfree(name);
|
||||
break;
|
||||
case Opt_io_size_bits:
|
||||
if (args->from && match_int(args, &arg))
|
||||
return -EINVAL;
|
||||
if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
|
||||
f2fs_warn(sbi, "Not support %ld, larger than %d",
|
||||
BIT(arg), BIO_MAX_VECS);
|
||||
return -EINVAL;
|
||||
}
|
||||
F2FS_OPTION(sbi).write_io_size_bits = arg;
|
||||
break;
|
||||
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
||||
case Opt_fault_injection:
|
||||
if (args->from && match_int(args, &arg))
|
||||
@ -1392,12 +1351,6 @@ default_check:
|
||||
}
|
||||
#endif
|
||||
|
||||
if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
|
||||
f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO",
|
||||
F2FS_IO_SIZE_KB(sbi));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_opt(sbi, INLINE_XATTR_SIZE)) {
|
||||
int min_size, max_size;
|
||||
|
||||
@ -1718,7 +1671,6 @@ static void f2fs_put_super(struct super_block *sb)
|
||||
|
||||
f2fs_destroy_page_array_cache(sbi);
|
||||
f2fs_destroy_xattr_caches(sbi);
|
||||
mempool_destroy(sbi->write_io_dummy);
|
||||
#ifdef CONFIG_QUOTA
|
||||
for (i = 0; i < MAXQUOTAS; i++)
|
||||
kfree(F2FS_OPTION(sbi).s_qf_names[i]);
|
||||
@ -2009,10 +1961,6 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
} else {
|
||||
seq_puts(seq, ",nodiscard");
|
||||
}
|
||||
if (test_opt(sbi, NOHEAP))
|
||||
seq_puts(seq, ",no_heap");
|
||||
else
|
||||
seq_puts(seq, ",heap");
|
||||
#ifdef CONFIG_F2FS_FS_XATTR
|
||||
if (test_opt(sbi, XATTR_USER))
|
||||
seq_puts(seq, ",user_xattr");
|
||||
@ -2078,9 +2026,6 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
F2FS_OPTION(sbi).s_resuid),
|
||||
from_kgid_munged(&init_user_ns,
|
||||
F2FS_OPTION(sbi).s_resgid));
|
||||
if (F2FS_IO_SIZE_BITS(sbi))
|
||||
seq_printf(seq, ",io_bits=%u",
|
||||
F2FS_OPTION(sbi).write_io_size_bits);
|
||||
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
||||
if (test_opt(sbi, FAULT_INJECTION)) {
|
||||
seq_printf(seq, ",fault_injection=%u",
|
||||
@ -2192,7 +2137,6 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
|
||||
set_opt(sbi, INLINE_XATTR);
|
||||
set_opt(sbi, INLINE_DATA);
|
||||
set_opt(sbi, INLINE_DENTRY);
|
||||
set_opt(sbi, NOHEAP);
|
||||
set_opt(sbi, MERGE_CHECKPOINT);
|
||||
F2FS_OPTION(sbi).unusable_cap = 0;
|
||||
sbi->sb->s_flags |= SB_LAZYTIME;
|
||||
@ -2247,6 +2191,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
.init_gc_type = FG_GC,
|
||||
.should_migrate_blocks = false,
|
||||
.err_gc_skipped = true,
|
||||
.no_bg_gc = true,
|
||||
.nr_free_secs = 1 };
|
||||
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
@ -2332,7 +2277,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
|
||||
bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
|
||||
bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
|
||||
bool no_io_align = !F2FS_IO_ALIGNED(sbi);
|
||||
bool no_atgc = !test_opt(sbi, ATGC);
|
||||
bool no_discard = !test_opt(sbi, DISCARD);
|
||||
bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
|
||||
@ -2440,12 +2384,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
|
||||
err = -EINVAL;
|
||||
f2fs_warn(sbi, "switch io_bits option is not allowed");
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
|
||||
err = -EINVAL;
|
||||
f2fs_warn(sbi, "switch compress_cache option is not allowed");
|
||||
@ -3706,7 +3644,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
|
||||
main_segs = le32_to_cpu(raw_super->segment_count_main);
|
||||
blocks_per_seg = sbi->blocks_per_seg;
|
||||
blocks_per_seg = BLKS_PER_SEG(sbi);
|
||||
|
||||
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
|
||||
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
|
||||
@ -3818,9 +3756,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
||||
sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
|
||||
sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
|
||||
sbi->total_sections = le32_to_cpu(raw_super->section_count);
|
||||
sbi->total_node_count =
|
||||
(le32_to_cpu(raw_super->segment_count_nat) / 2)
|
||||
* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
|
||||
sbi->total_node_count = SEGS_TO_BLKS(sbi,
|
||||
((le32_to_cpu(raw_super->segment_count_nat) / 2) *
|
||||
NAT_ENTRY_PER_BLOCK));
|
||||
F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
|
||||
F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
|
||||
F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
|
||||
@ -3829,7 +3767,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
||||
sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
|
||||
sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
|
||||
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
|
||||
sbi->migration_granularity = sbi->segs_per_sec;
|
||||
sbi->migration_granularity = SEGS_PER_SEC(sbi);
|
||||
sbi->seq_file_ra_mul = MIN_RA_MUL;
|
||||
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
|
||||
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
|
||||
@ -3930,11 +3868,6 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
|
||||
return 0;
|
||||
|
||||
zone_sectors = bdev_zone_sectors(bdev);
|
||||
if (!is_power_of_2(zone_sectors)) {
|
||||
f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
|
||||
SECTOR_TO_BLOCK(zone_sectors))
|
||||
return -EINVAL;
|
||||
@ -4090,7 +4023,9 @@ static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
|
||||
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
if (err)
|
||||
f2fs_err(sbi, "f2fs_commit_super fails to record err:%d", err);
|
||||
f2fs_err_ratelimited(sbi,
|
||||
"f2fs_commit_super fails to record stop_reason, err:%d",
|
||||
err);
|
||||
}
|
||||
|
||||
void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
|
||||
@ -4133,8 +4068,9 @@ static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error)
|
||||
|
||||
err = f2fs_commit_super(sbi, false);
|
||||
if (err)
|
||||
f2fs_err(sbi, "f2fs_commit_super fails to record errors:%u, err:%d",
|
||||
error, err);
|
||||
f2fs_err_ratelimited(sbi,
|
||||
"f2fs_commit_super fails to record errors:%u, err:%d",
|
||||
error, err);
|
||||
out_unlock:
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
}
|
||||
@ -4259,14 +4195,14 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
|
||||
if (i == 0) {
|
||||
FDEV(i).start_blk = 0;
|
||||
FDEV(i).end_blk = FDEV(i).start_blk +
|
||||
(FDEV(i).total_segments <<
|
||||
sbi->log_blocks_per_seg) - 1 +
|
||||
le32_to_cpu(raw_super->segment0_blkaddr);
|
||||
SEGS_TO_BLKS(sbi,
|
||||
FDEV(i).total_segments) - 1 +
|
||||
le32_to_cpu(raw_super->segment0_blkaddr);
|
||||
} else {
|
||||
FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
|
||||
FDEV(i).end_blk = FDEV(i).start_blk +
|
||||
(FDEV(i).total_segments <<
|
||||
sbi->log_blocks_per_seg) - 1;
|
||||
SEGS_TO_BLKS(sbi,
|
||||
FDEV(i).total_segments) - 1;
|
||||
FDEV(i).bdev_file = bdev_file_open_by_path(
|
||||
FDEV(i).path, mode, sbi->sb, NULL);
|
||||
}
|
||||
@ -4305,8 +4241,6 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
|
||||
FDEV(i).total_segments,
|
||||
FDEV(i).start_blk, FDEV(i).end_blk);
|
||||
}
|
||||
f2fs_info(sbi,
|
||||
"IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4519,19 +4453,10 @@ try_onemore:
|
||||
if (err)
|
||||
goto free_iostat;
|
||||
|
||||
if (F2FS_IO_ALIGNED(sbi)) {
|
||||
sbi->write_io_dummy =
|
||||
mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
|
||||
if (!sbi->write_io_dummy) {
|
||||
err = -ENOMEM;
|
||||
goto free_percpu;
|
||||
}
|
||||
}
|
||||
|
||||
/* init per sbi slab cache */
|
||||
err = f2fs_init_xattr_caches(sbi);
|
||||
if (err)
|
||||
goto free_io_dummy;
|
||||
goto free_percpu;
|
||||
err = f2fs_init_page_array_cache(sbi);
|
||||
if (err)
|
||||
goto free_xattr_cache;
|
||||
@ -4619,10 +4544,6 @@ try_onemore:
|
||||
goto free_nm;
|
||||
}
|
||||
|
||||
err = adjust_reserved_segment(sbi);
|
||||
if (err)
|
||||
goto free_nm;
|
||||
|
||||
/* For write statistics */
|
||||
sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
|
||||
|
||||
@ -4749,13 +4670,20 @@ reset_checkpoint:
|
||||
* If the f2fs is not readonly and fsync data recovery succeeds,
|
||||
* check zoned block devices' write pointer consistency.
|
||||
*/
|
||||
if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
|
||||
err = f2fs_check_write_pointer(sbi);
|
||||
if (err)
|
||||
goto free_meta;
|
||||
}
|
||||
if (f2fs_sb_has_blkzoned(sbi) && !f2fs_readonly(sb)) {
|
||||
int err2;
|
||||
|
||||
f2fs_init_inmem_curseg(sbi);
|
||||
f2fs_notice(sbi, "Checking entire write pointers");
|
||||
err2 = f2fs_check_write_pointer(sbi);
|
||||
if (err2)
|
||||
err = err2;
|
||||
}
|
||||
if (err)
|
||||
goto free_meta;
|
||||
|
||||
err = f2fs_init_inmem_curseg(sbi);
|
||||
if (err)
|
||||
goto sync_free_meta;
|
||||
|
||||
/* f2fs_recover_fsync_data() cleared this already */
|
||||
clear_sbi_flag(sbi, SBI_POR_DOING);
|
||||
@ -4854,8 +4782,6 @@ free_page_array_cache:
|
||||
f2fs_destroy_page_array_cache(sbi);
|
||||
free_xattr_cache:
|
||||
f2fs_destroy_xattr_caches(sbi);
|
||||
free_io_dummy:
|
||||
mempool_destroy(sbi->write_io_dummy);
|
||||
free_percpu:
|
||||
destroy_percpu_info(sbi);
|
||||
free_iostat:
|
||||
|
@ -493,8 +493,8 @@ out:
|
||||
spin_lock(&sbi->stat_lock);
|
||||
if (t > (unsigned long)(sbi->user_block_count -
|
||||
F2FS_OPTION(sbi).root_reserved_blocks -
|
||||
sbi->blocks_per_seg *
|
||||
SM_I(sbi)->additional_reserved_segments)) {
|
||||
SEGS_TO_BLKS(sbi,
|
||||
SM_I(sbi)->additional_reserved_segments))) {
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -551,7 +551,7 @@ out:
|
||||
}
|
||||
|
||||
if (!strcmp(a->attr.name, "migration_granularity")) {
|
||||
if (t == 0 || t > sbi->segs_per_sec)
|
||||
if (t == 0 || t > SEGS_PER_SEC(sbi))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1492,6 +1492,50 @@ static int __maybe_unused discard_plist_seq_show(struct seq_file *seq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused disk_map_seq_show(struct seq_file *seq,
|
||||
void *offset)
|
||||
{
|
||||
struct super_block *sb = seq->private;
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
int i;
|
||||
|
||||
seq_printf(seq, "Address Layout : %5luB Block address (# of Segments)\n",
|
||||
F2FS_BLKSIZE);
|
||||
seq_printf(seq, " SB : %12s\n", "0/1024B");
|
||||
seq_printf(seq, " seg0_blkaddr : 0x%010x\n", SEG0_BLKADDR(sbi));
|
||||
seq_printf(seq, " Checkpoint : 0x%010x (%10d)\n",
|
||||
le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr), 2);
|
||||
seq_printf(seq, " SIT : 0x%010x (%10d)\n",
|
||||
SIT_I(sbi)->sit_base_addr,
|
||||
le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_sit));
|
||||
seq_printf(seq, " NAT : 0x%010x (%10d)\n",
|
||||
NM_I(sbi)->nat_blkaddr,
|
||||
le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_nat));
|
||||
seq_printf(seq, " SSA : 0x%010x (%10d)\n",
|
||||
SM_I(sbi)->ssa_blkaddr,
|
||||
le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_ssa));
|
||||
seq_printf(seq, " Main : 0x%010x (%10d)\n",
|
||||
SM_I(sbi)->main_blkaddr,
|
||||
le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main));
|
||||
seq_printf(seq, " # of Sections : %12d\n",
|
||||
le32_to_cpu(F2FS_RAW_SUPER(sbi)->section_count));
|
||||
seq_printf(seq, " Segs/Sections : %12d\n",
|
||||
SEGS_PER_SEC(sbi));
|
||||
seq_printf(seq, " Section size : %12d MB\n",
|
||||
SEGS_PER_SEC(sbi) << 1);
|
||||
|
||||
if (!f2fs_is_multi_device(sbi))
|
||||
return 0;
|
||||
|
||||
seq_puts(seq, "\nDisk Map for multi devices:\n");
|
||||
for (i = 0; i < sbi->s_ndevs; i++)
|
||||
seq_printf(seq, "Disk:%2d (zoned=%d): 0x%010x - 0x%010x on %s\n",
|
||||
i, bdev_is_zoned(FDEV(i).bdev),
|
||||
FDEV(i).start_blk, FDEV(i).end_blk,
|
||||
FDEV(i).path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init f2fs_init_sysfs(void)
|
||||
{
|
||||
int ret;
|
||||
@ -1573,6 +1617,8 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
|
||||
victim_bits_seq_show, sb);
|
||||
proc_create_single_data("discard_plist_info", 0444, sbi->s_proc,
|
||||
discard_plist_seq_show, sb);
|
||||
proc_create_single_data("disk_map", 0444, sbi->s_proc,
|
||||
disk_map_seq_show, sb);
|
||||
return 0;
|
||||
put_feature_list_kobj:
|
||||
kobject_put(&sbi->s_feature_list_kobj);
|
||||
|
@ -258,21 +258,23 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
|
||||
pgoff_t index,
|
||||
unsigned long num_ra_pages)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT;
|
||||
|
||||
page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
|
||||
if (!page || !PageUptodate(page)) {
|
||||
folio = __filemap_get_folio(inode->i_mapping, index, FGP_ACCESSED, 0);
|
||||
if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
|
||||
DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index);
|
||||
|
||||
if (page)
|
||||
put_page(page);
|
||||
if (!IS_ERR(folio))
|
||||
folio_put(folio);
|
||||
else if (num_ra_pages > 1)
|
||||
page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
|
||||
page = read_mapping_page(inode->i_mapping, index, NULL);
|
||||
folio = read_mapping_folio(inode->i_mapping, index, NULL);
|
||||
if (IS_ERR(folio))
|
||||
return ERR_CAST(folio);
|
||||
}
|
||||
return page;
|
||||
return folio_file_page(folio, index);
|
||||
}
|
||||
|
||||
static int f2fs_write_merkle_tree_block(struct inode *inode, const void *buf,
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
|
||||
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
|
||||
#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
|
||||
|
||||
/* 0, 1(node nid), 2(meta nid) are reserved node id */
|
||||
#define F2FS_RESERVED_NODE_NUM 3
|
||||
@ -40,12 +41,6 @@
|
||||
|
||||
#define F2FS_ENC_UTF8_12_1 1
|
||||
|
||||
#define F2FS_IO_SIZE(sbi) BIT(F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
|
||||
#define F2FS_IO_SIZE_KB(sbi) BIT(F2FS_OPTION(sbi).write_io_size_bits + 2) /* KB */
|
||||
#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
|
||||
#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
|
||||
#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1)
|
||||
|
||||
/* This flag is used by node and meta inodes, and by recovery */
|
||||
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
|
||||
|
||||
@ -81,6 +76,7 @@ enum stop_cp_reason {
|
||||
STOP_CP_REASON_CORRUPTED_SUMMARY,
|
||||
STOP_CP_REASON_UPDATE_INODE,
|
||||
STOP_CP_REASON_FLUSH_FAIL,
|
||||
STOP_CP_REASON_NO_SEGMENT,
|
||||
STOP_CP_REASON_MAX,
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user