mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
Merge branch 'dev' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
This commit is contained in:
commit
94e453ddc5
@ -579,6 +579,12 @@ Description: When ATGC is on, it controls age threshold to bypass GCing young
|
||||
candidates whose age is not beyond the threshold, by default it was
|
||||
initialized as 604800 seconds (equals to 7 days).
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/atgc_enabled
|
||||
Date: Feb 2024
|
||||
Contact: "Jinbao Liu" <liujinbao1@xiaomi.com>
|
||||
Description: It represents whether ATGC is on or off. The value is 1 which
|
||||
indicates that ATGC is on, and 0 indicates that it is off.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/gc_reclaimed_segments
|
||||
Date: July 2021
|
||||
Contact: "Daeho Jeong" <daehojeong@google.com>
|
||||
@ -763,3 +769,17 @@ Date: November 2023
|
||||
Contact: "Chao Yu" <chao@kernel.org>
|
||||
Description: It controls to enable/disable IO aware feature for background discard.
|
||||
By default, the value is 1 which indicates IO aware is on.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/blkzone_alloc_policy
|
||||
Date: July 2024
|
||||
Contact: "Yuanhong Liao" <liaoyuanhong@vivo.com>
|
||||
Description: The zone UFS we are currently using consists of two parts:
|
||||
conventional zones and sequential zones. It can be used to control which part
|
||||
to prioritize for writes, with a default value of 0.
|
||||
|
||||
======================== =========================================
|
||||
value description
|
||||
blkzone_alloc_policy = 0 Prioritize writing to sequential zones
|
||||
blkzone_alloc_policy = 1 Only allow writing to sequential zones
|
||||
blkzone_alloc_policy = 2 Prioritize writing to conventional zones
|
||||
======================== =========================================
|
||||
|
@ -7,7 +7,6 @@
|
||||
*/
|
||||
#include <linux/fs.h>
|
||||
#include <linux/f2fs_fs.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/mpage.h>
|
||||
#include <linux/writeback.h>
|
||||
@ -1939,7 +1938,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
|
||||
inode_lock_shared(inode);
|
||||
|
||||
maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
|
||||
maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
|
||||
if (start > maxbytes) {
|
||||
ret = -EFBIG;
|
||||
goto out;
|
||||
@ -2064,7 +2063,7 @@ out:
|
||||
static inline loff_t f2fs_readpage_limit(struct inode *inode)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
|
||||
return inode->i_sb->s_maxbytes;
|
||||
return F2FS_BLK_TO_BYTES(max_file_blocks(inode));
|
||||
|
||||
return i_size_read(inode);
|
||||
}
|
||||
@ -2650,10 +2649,13 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
|
||||
struct dnode_of_data dn;
|
||||
struct node_info ni;
|
||||
bool ipu_force = false;
|
||||
bool atomic_commit;
|
||||
int err = 0;
|
||||
|
||||
/* Use COW inode to make dnode_of_data for atomic write */
|
||||
if (f2fs_is_atomic_file(inode))
|
||||
atomic_commit = f2fs_is_atomic_file(inode) &&
|
||||
page_private_atomic(fio->page);
|
||||
if (atomic_commit)
|
||||
set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
|
||||
else
|
||||
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
||||
@ -2752,6 +2754,8 @@ got_it:
|
||||
f2fs_outplace_write_data(&dn, fio);
|
||||
trace_f2fs_do_write_data_page(page_folio(page), OPU);
|
||||
set_inode_flag(inode, FI_APPEND_WRITE);
|
||||
if (atomic_commit)
|
||||
clear_page_private_atomic(page);
|
||||
out_writepage:
|
||||
f2fs_put_dnode(&dn);
|
||||
out:
|
||||
@ -3721,6 +3725,9 @@ static int f2fs_write_end(struct file *file,
|
||||
|
||||
set_page_dirty(page);
|
||||
|
||||
if (f2fs_is_atomic_file(inode))
|
||||
set_page_private_atomic(page);
|
||||
|
||||
if (pos + copied > i_size_read(inode) &&
|
||||
!f2fs_verity_in_progress(inode)) {
|
||||
f2fs_i_size_write(inode, pos + copied);
|
||||
|
@ -166,7 +166,8 @@ static unsigned long dir_block_index(unsigned int level,
|
||||
unsigned long bidx = 0;
|
||||
|
||||
for (i = 0; i < level; i++)
|
||||
bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
|
||||
bidx += mul_u32_u32(dir_buckets(i, dir_level),
|
||||
bucket_blocks(i));
|
||||
bidx += idx * bucket_blocks(level);
|
||||
return bidx;
|
||||
}
|
||||
|
@ -366,7 +366,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
|
||||
static void __drop_largest_extent(struct extent_tree *et,
|
||||
pgoff_t fofs, unsigned int len)
|
||||
{
|
||||
if (fofs < et->largest.fofs + et->largest.len &&
|
||||
if (fofs < (pgoff_t)et->largest.fofs + et->largest.len &&
|
||||
fofs + len > et->largest.fofs) {
|
||||
et->largest.len = 0;
|
||||
et->largest_updated = true;
|
||||
@ -456,7 +456,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
|
||||
|
||||
if (type == EX_READ &&
|
||||
et->largest.fofs <= pgofs &&
|
||||
et->largest.fofs + et->largest.len > pgofs) {
|
||||
(pgoff_t)et->largest.fofs + et->largest.len > pgofs) {
|
||||
*ei = et->largest;
|
||||
ret = true;
|
||||
stat_inc_largest_node_hit(sbi);
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/uio.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/magic.h>
|
||||
@ -134,6 +133,12 @@ typedef u32 nid_t;
|
||||
|
||||
#define COMPRESS_EXT_NUM 16
|
||||
|
||||
enum blkzone_allocation_policy {
|
||||
BLKZONE_ALLOC_PRIOR_SEQ, /* Prioritize writing to sequential zones */
|
||||
BLKZONE_ALLOC_ONLY_SEQ, /* Only allow writing to sequential zones */
|
||||
BLKZONE_ALLOC_PRIOR_CONV, /* Prioritize writing to conventional zones */
|
||||
};
|
||||
|
||||
/*
|
||||
* An implementation of an rwsem that is explicitly unfair to readers. This
|
||||
* prevents priority inversion when a low-priority reader acquires the read lock
|
||||
@ -285,6 +290,7 @@ enum {
|
||||
APPEND_INO, /* for append ino list */
|
||||
UPDATE_INO, /* for update ino list */
|
||||
TRANS_DIR_INO, /* for transactions dir ino list */
|
||||
XATTR_DIR_INO, /* for xattr updated dir ino list */
|
||||
FLUSH_INO, /* for multiple device flushing */
|
||||
MAX_INO_ENTRY, /* max. list */
|
||||
};
|
||||
@ -1155,6 +1161,7 @@ enum cp_reason_type {
|
||||
CP_FASTBOOT_MODE,
|
||||
CP_SPEC_LOG_NUM,
|
||||
CP_RECOVER_DIR,
|
||||
CP_XATTR_DIR,
|
||||
};
|
||||
|
||||
enum iostat_type {
|
||||
@ -1418,7 +1425,8 @@ static inline void f2fs_clear_bit(unsigned int nr, char *addr);
|
||||
* bit 1 PAGE_PRIVATE_ONGOING_MIGRATION
|
||||
* bit 2 PAGE_PRIVATE_INLINE_INODE
|
||||
* bit 3 PAGE_PRIVATE_REF_RESOURCE
|
||||
* bit 4- f2fs private data
|
||||
* bit 4 PAGE_PRIVATE_ATOMIC_WRITE
|
||||
* bit 5- f2fs private data
|
||||
*
|
||||
* Layout B: lowest bit should be 0
|
||||
* page.private is a wrapped pointer.
|
||||
@ -1428,6 +1436,7 @@ enum {
|
||||
PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
|
||||
PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
|
||||
PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
|
||||
PAGE_PRIVATE_ATOMIC_WRITE, /* data page from atomic write path */
|
||||
PAGE_PRIVATE_MAX
|
||||
};
|
||||
|
||||
@ -1559,6 +1568,8 @@ struct f2fs_sb_info {
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
unsigned int blocks_per_blkz; /* F2FS blocks per zone */
|
||||
unsigned int max_open_zones; /* max open zone resources of the zoned device */
|
||||
/* For adjust the priority writing position of data in zone UFS */
|
||||
unsigned int blkzone_alloc_policy;
|
||||
#endif
|
||||
|
||||
/* for node-related operations */
|
||||
@ -1994,6 +2005,16 @@ static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
|
||||
return (struct f2fs_super_block *)(sbi->raw_super);
|
||||
}
|
||||
|
||||
static inline struct f2fs_super_block *F2FS_SUPER_BLOCK(struct folio *folio,
|
||||
pgoff_t index)
|
||||
{
|
||||
pgoff_t idx_in_folio = index % (1 << folio_order(folio));
|
||||
|
||||
return (struct f2fs_super_block *)
|
||||
(page_address(folio_page(folio, idx_in_folio)) +
|
||||
F2FS_SUPER_OFFSET);
|
||||
}
|
||||
|
||||
static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
return (struct f2fs_checkpoint *)(sbi->ckpt);
|
||||
@ -2396,14 +2417,17 @@ static inline void clear_page_private_##name(struct page *page) \
|
||||
PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
|
||||
PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
|
||||
PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
|
||||
PAGE_PRIVATE_GET_FUNC(atomic, ATOMIC_WRITE);
|
||||
|
||||
PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
|
||||
PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
|
||||
PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
|
||||
PAGE_PRIVATE_SET_FUNC(atomic, ATOMIC_WRITE);
|
||||
|
||||
PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
|
||||
PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
|
||||
PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
|
||||
PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
|
||||
|
||||
static inline unsigned long get_page_private_data(struct page *page)
|
||||
{
|
||||
@ -2435,6 +2459,7 @@ static inline void clear_page_private_all(struct page *page)
|
||||
clear_page_private_reference(page);
|
||||
clear_page_private_gcing(page);
|
||||
clear_page_private_inline(page);
|
||||
clear_page_private_atomic(page);
|
||||
|
||||
f2fs_bug_on(F2FS_P_SB(page), page_private(page));
|
||||
}
|
||||
@ -2900,26 +2925,27 @@ static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
|
||||
}
|
||||
|
||||
static inline int f2fs_has_extra_attr(struct inode *inode);
|
||||
static inline unsigned int get_dnode_base(struct inode *inode,
|
||||
struct page *node_page)
|
||||
{
|
||||
if (!IS_INODE(node_page))
|
||||
return 0;
|
||||
|
||||
return inode ? get_extra_isize(inode) :
|
||||
offset_in_addr(&F2FS_NODE(node_page)->i);
|
||||
}
|
||||
|
||||
static inline __le32 *get_dnode_addr(struct inode *inode,
|
||||
struct page *node_page)
|
||||
{
|
||||
return blkaddr_in_node(F2FS_NODE(node_page)) +
|
||||
get_dnode_base(inode, node_page);
|
||||
}
|
||||
|
||||
static inline block_t data_blkaddr(struct inode *inode,
|
||||
struct page *node_page, unsigned int offset)
|
||||
{
|
||||
struct f2fs_node *raw_node;
|
||||
__le32 *addr_array;
|
||||
int base = 0;
|
||||
bool is_inode = IS_INODE(node_page);
|
||||
|
||||
raw_node = F2FS_NODE(node_page);
|
||||
|
||||
if (is_inode) {
|
||||
if (!inode)
|
||||
/* from GC path only */
|
||||
base = offset_in_addr(&raw_node->i);
|
||||
else if (f2fs_has_extra_attr(inode))
|
||||
base = get_extra_isize(inode);
|
||||
}
|
||||
|
||||
addr_array = blkaddr_in_node(raw_node);
|
||||
return le32_to_cpu(addr_array[base + offset]);
|
||||
return le32_to_cpu(*(get_dnode_addr(inode, node_page) + offset));
|
||||
}
|
||||
|
||||
static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
|
||||
@ -3292,8 +3318,6 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
|
||||
return is_inode_flag_set(inode, FI_COW_FILE);
|
||||
}
|
||||
|
||||
static inline __le32 *get_dnode_addr(struct inode *inode,
|
||||
struct page *node_page);
|
||||
static inline void *inline_data_addr(struct inode *inode, struct page *page)
|
||||
{
|
||||
__le32 *addr = get_dnode_addr(inode, page);
|
||||
@ -3432,17 +3456,6 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
|
||||
return F2FS_I(inode)->i_inline_xattr_size;
|
||||
}
|
||||
|
||||
static inline __le32 *get_dnode_addr(struct inode *inode,
|
||||
struct page *node_page)
|
||||
{
|
||||
int base = 0;
|
||||
|
||||
if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
|
||||
base = get_extra_isize(inode);
|
||||
|
||||
return blkaddr_in_node(F2FS_NODE(node_page)) + base;
|
||||
}
|
||||
|
||||
#define f2fs_get_inode_mode(i) \
|
||||
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
|
||||
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
|
||||
@ -3987,7 +4000,7 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
|
||||
|
||||
#define stat_inc_cp_call_count(sbi, foreground) \
|
||||
atomic_inc(&sbi->cp_call_count[(foreground)])
|
||||
#define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++)
|
||||
#define stat_inc_cp_count(sbi) (F2FS_STAT(sbi)->cp_count++)
|
||||
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
|
||||
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
|
||||
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/f2fs_fs.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/falloc.h>
|
||||
@ -218,6 +217,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
|
||||
f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
|
||||
TRANS_DIR_INO))
|
||||
cp_reason = CP_RECOVER_DIR;
|
||||
else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
|
||||
XATTR_DIR_INO))
|
||||
cp_reason = CP_XATTR_DIR;
|
||||
|
||||
return cp_reason;
|
||||
}
|
||||
@ -373,8 +375,7 @@ sync_nodes:
|
||||
f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
|
||||
clear_inode_flag(inode, FI_APPEND_WRITE);
|
||||
flush_out:
|
||||
if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
|
||||
(atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
|
||||
if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
|
||||
ret = f2fs_issue_flush(sbi, inode->i_ino);
|
||||
if (!ret) {
|
||||
f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
|
||||
@ -431,7 +432,7 @@ static bool __found_offset(struct address_space *mapping,
|
||||
static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
|
||||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
loff_t maxbytes = inode->i_sb->s_maxbytes;
|
||||
loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
|
||||
struct dnode_of_data dn;
|
||||
pgoff_t pgofs, end_offset;
|
||||
loff_t data_ofs = offset;
|
||||
@ -513,10 +514,7 @@ fail:
|
||||
static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
|
||||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
loff_t maxbytes = inode->i_sb->s_maxbytes;
|
||||
|
||||
if (f2fs_compressed_file(inode))
|
||||
maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
|
||||
loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
|
||||
|
||||
switch (whence) {
|
||||
case SEEK_SET:
|
||||
@ -1052,6 +1050,13 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for inflight dio, blocks should be removed after
|
||||
* IO completion.
|
||||
*/
|
||||
if (attr->ia_size < old_size)
|
||||
inode_dio_wait(inode);
|
||||
|
||||
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
@ -1888,6 +1893,12 @@ static long f2fs_fallocate(struct file *file, int mode,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* wait for inflight dio, blocks should be removed after IO
|
||||
* completion.
|
||||
*/
|
||||
inode_dio_wait(inode);
|
||||
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE) {
|
||||
if (offset >= inode->i_size)
|
||||
goto out;
|
||||
@ -2710,7 +2721,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
|
||||
* block addresses are continuous.
|
||||
*/
|
||||
if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
|
||||
if (ei.fofs + ei.len >= pg_end)
|
||||
if ((pgoff_t)ei.fofs + ei.len >= pg_end)
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -4597,6 +4608,10 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos,
|
||||
iov_iter_count(to), READ);
|
||||
|
||||
/* In LFS mode, if there is inflight dio, wait for its completion */
|
||||
if (f2fs_lfs_mode(F2FS_I_SB(inode)))
|
||||
inode_dio_wait(inode);
|
||||
|
||||
if (f2fs_should_use_dio(inode, iocb, to)) {
|
||||
ret = f2fs_dio_read_iter(iocb, to);
|
||||
} else {
|
||||
|
@ -262,33 +262,32 @@ out:
|
||||
|
||||
int f2fs_write_inline_data(struct inode *inode, struct page *page)
|
||||
{
|
||||
struct dnode_of_data dn;
|
||||
int err;
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
struct page *ipage;
|
||||
|
||||
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
||||
err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
|
||||
if (err)
|
||||
return err;
|
||||
ipage = f2fs_get_node_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(ipage))
|
||||
return PTR_ERR(ipage);
|
||||
|
||||
if (!f2fs_has_inline_data(inode)) {
|
||||
f2fs_put_dnode(&dn);
|
||||
f2fs_put_page(ipage, 1);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
f2fs_bug_on(F2FS_I_SB(inode), page->index);
|
||||
|
||||
f2fs_wait_on_page_writeback(dn.inode_page, NODE, true, true);
|
||||
memcpy_from_page(inline_data_addr(inode, dn.inode_page),
|
||||
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
|
||||
memcpy_from_page(inline_data_addr(inode, ipage),
|
||||
page, 0, MAX_INLINE_DATA(inode));
|
||||
set_page_dirty(dn.inode_page);
|
||||
set_page_dirty(ipage);
|
||||
|
||||
f2fs_clear_page_cache_dirty_tag(page);
|
||||
|
||||
set_inode_flag(inode, FI_APPEND_WRITE);
|
||||
set_inode_flag(inode, FI_DATA_EXIST);
|
||||
|
||||
clear_page_private_inline(dn.inode_page);
|
||||
f2fs_put_dnode(&dn);
|
||||
clear_page_private_inline(ipage);
|
||||
f2fs_put_page(ipage, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
*/
|
||||
#include <linux/fs.h>
|
||||
#include <linux/f2fs_fs.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/lz4.h>
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include "iostat.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
|
||||
#define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
|
||||
|
||||
static struct kmem_cache *nat_entry_slab;
|
||||
static struct kmem_cache *free_nid_slab;
|
||||
@ -1674,7 +1674,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
goto redirty_out;
|
||||
}
|
||||
|
||||
if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
|
||||
if (atomic && !test_opt(sbi, NOBARRIER))
|
||||
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
|
||||
|
||||
/* should add to global list before clearing PAGECACHE status */
|
||||
|
@ -2686,17 +2686,40 @@ static int get_new_segment(struct f2fs_sb_info *sbi,
|
||||
goto got_it;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
/*
|
||||
* If we format f2fs on zoned storage, let's try to get pinned sections
|
||||
* from beginning of the storage, which should be a conventional one.
|
||||
*/
|
||||
if (f2fs_sb_has_blkzoned(sbi)) {
|
||||
segno = pinning ? 0 : max(first_zoned_segno(sbi), *newseg);
|
||||
/* Prioritize writing to conventional zones */
|
||||
if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_PRIOR_CONV || pinning)
|
||||
segno = 0;
|
||||
else
|
||||
segno = max(first_zoned_segno(sbi), *newseg);
|
||||
hint = GET_SEC_FROM_SEG(sbi, segno);
|
||||
}
|
||||
#endif
|
||||
|
||||
find_other_zone:
|
||||
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
if (secno >= MAIN_SECS(sbi) && f2fs_sb_has_blkzoned(sbi)) {
|
||||
/* Write only to sequential zones */
|
||||
if (sbi->blkzone_alloc_policy == BLKZONE_ALLOC_ONLY_SEQ) {
|
||||
hint = GET_SEC_FROM_SEG(sbi, first_zoned_segno(sbi));
|
||||
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
|
||||
} else
|
||||
secno = find_first_zero_bit(free_i->free_secmap,
|
||||
MAIN_SECS(sbi));
|
||||
if (secno >= MAIN_SECS(sbi)) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (secno >= MAIN_SECS(sbi)) {
|
||||
secno = find_first_zero_bit(free_i->free_secmap,
|
||||
MAIN_SECS(sbi));
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/fs_context.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/statfs.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/parser.h>
|
||||
#include <linux/mount.h>
|
||||
@ -3323,24 +3322,42 @@ loff_t max_file_blocks(struct inode *inode)
|
||||
return result;
|
||||
}
|
||||
|
||||
static int __f2fs_commit_super(struct buffer_head *bh,
|
||||
struct f2fs_super_block *super)
|
||||
static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio,
|
||||
pgoff_t index, bool update)
|
||||
{
|
||||
lock_buffer(bh);
|
||||
if (super)
|
||||
memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
|
||||
set_buffer_dirty(bh);
|
||||
unlock_buffer(bh);
|
||||
|
||||
struct bio *bio;
|
||||
/* it's rare case, we can do fua all the time */
|
||||
return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
|
||||
blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA;
|
||||
int ret;
|
||||
|
||||
folio_lock(folio);
|
||||
folio_wait_writeback(folio);
|
||||
if (update)
|
||||
memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi),
|
||||
sizeof(struct f2fs_super_block));
|
||||
folio_mark_dirty(folio);
|
||||
folio_clear_dirty_for_io(folio);
|
||||
folio_start_writeback(folio);
|
||||
folio_unlock(folio);
|
||||
|
||||
bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS);
|
||||
|
||||
/* it doesn't need to set crypto context for superblock update */
|
||||
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(folio_index(folio));
|
||||
|
||||
if (!bio_add_folio(bio, folio, folio_size(folio), 0))
|
||||
f2fs_bug_on(sbi, 1);
|
||||
|
||||
ret = submit_bio_wait(bio);
|
||||
folio_end_writeback(folio);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
|
||||
struct buffer_head *bh)
|
||||
struct folio *folio, pgoff_t index)
|
||||
{
|
||||
struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
|
||||
(bh->b_data + F2FS_SUPER_OFFSET);
|
||||
struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
|
||||
struct super_block *sb = sbi->sb;
|
||||
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
|
||||
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
|
||||
@ -3356,9 +3373,9 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
|
||||
u32 segment_count = le32_to_cpu(raw_super->segment_count);
|
||||
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
|
||||
u64 main_end_blkaddr = main_blkaddr +
|
||||
(segment_count_main << log_blocks_per_seg);
|
||||
((u64)segment_count_main << log_blocks_per_seg);
|
||||
u64 seg_end_blkaddr = segment0_blkaddr +
|
||||
(segment_count << log_blocks_per_seg);
|
||||
((u64)segment_count << log_blocks_per_seg);
|
||||
|
||||
if (segment0_blkaddr != cp_blkaddr) {
|
||||
f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
|
||||
@ -3415,7 +3432,7 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
|
||||
set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
|
||||
res = "internally";
|
||||
} else {
|
||||
err = __f2fs_commit_super(bh, NULL);
|
||||
err = __f2fs_commit_super(sbi, folio, index, false);
|
||||
res = err ? "failed" : "done";
|
||||
}
|
||||
f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
|
||||
@ -3428,12 +3445,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
|
||||
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
||||
struct buffer_head *bh)
|
||||
struct folio *folio, pgoff_t index)
|
||||
{
|
||||
block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
|
||||
block_t total_sections, blocks_per_seg;
|
||||
struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
|
||||
(bh->b_data + F2FS_SUPER_OFFSET);
|
||||
struct f2fs_super_block *raw_super = F2FS_SUPER_BLOCK(folio, index);
|
||||
size_t crc_offset = 0;
|
||||
__u32 crc = 0;
|
||||
|
||||
@ -3591,7 +3607,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
|
||||
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
|
||||
if (sanity_check_area_boundary(sbi, bh))
|
||||
if (sanity_check_area_boundary(sbi, folio, index))
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
return 0;
|
||||
@ -3938,7 +3954,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
|
||||
{
|
||||
struct super_block *sb = sbi->sb;
|
||||
int block;
|
||||
struct buffer_head *bh;
|
||||
struct folio *folio;
|
||||
struct f2fs_super_block *super;
|
||||
int err = 0;
|
||||
|
||||
@ -3947,32 +3963,32 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
|
||||
return -ENOMEM;
|
||||
|
||||
for (block = 0; block < 2; block++) {
|
||||
bh = sb_bread(sb, block);
|
||||
if (!bh) {
|
||||
folio = read_mapping_folio(sb->s_bdev->bd_mapping, block, NULL);
|
||||
if (IS_ERR(folio)) {
|
||||
f2fs_err(sbi, "Unable to read %dth superblock",
|
||||
block + 1);
|
||||
err = -EIO;
|
||||
err = PTR_ERR(folio);
|
||||
*recovery = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* sanity checking of raw super */
|
||||
err = sanity_check_raw_super(sbi, bh);
|
||||
err = sanity_check_raw_super(sbi, folio, block);
|
||||
if (err) {
|
||||
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
|
||||
block + 1);
|
||||
brelse(bh);
|
||||
folio_put(folio);
|
||||
*recovery = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!*raw_super) {
|
||||
memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
|
||||
memcpy(super, F2FS_SUPER_BLOCK(folio, block),
|
||||
sizeof(*super));
|
||||
*valid_super_block = block;
|
||||
*raw_super = super;
|
||||
}
|
||||
brelse(bh);
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
/* No valid superblock */
|
||||
@ -3986,7 +4002,8 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
|
||||
|
||||
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
struct folio *folio;
|
||||
pgoff_t index;
|
||||
__u32 crc = 0;
|
||||
int err;
|
||||
|
||||
@ -4004,22 +4021,24 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
|
||||
}
|
||||
|
||||
/* write back-up superblock first */
|
||||
bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
|
||||
if (!bh)
|
||||
return -EIO;
|
||||
err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
|
||||
brelse(bh);
|
||||
index = sbi->valid_super_block ? 0 : 1;
|
||||
folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
err = __f2fs_commit_super(sbi, folio, index, true);
|
||||
folio_put(folio);
|
||||
|
||||
/* if we are in recovery path, skip writing valid superblock */
|
||||
if (recover || err)
|
||||
return err;
|
||||
|
||||
/* write current valid superblock */
|
||||
bh = sb_bread(sbi->sb, sbi->valid_super_block);
|
||||
if (!bh)
|
||||
return -EIO;
|
||||
err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
|
||||
brelse(bh);
|
||||
index = sbi->valid_super_block;
|
||||
folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL);
|
||||
if (IS_ERR(folio))
|
||||
return PTR_ERR(folio);
|
||||
err = __f2fs_commit_super(sbi, folio, index, true);
|
||||
folio_put(folio);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -4219,6 +4238,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
|
||||
sbi->aligned_blksize = true;
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
sbi->max_open_zones = UINT_MAX;
|
||||
sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ;
|
||||
#endif
|
||||
|
||||
for (i = 0; i < max_devices; i++) {
|
||||
|
@ -170,6 +170,12 @@ static ssize_t undiscard_blks_show(struct f2fs_attr *a,
|
||||
SM_I(sbi)->dcc_info->undiscard_blks);
|
||||
}
|
||||
|
||||
static ssize_t atgc_enabled_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%d\n", sbi->am.atgc_enabled ? 1 : 0);
|
||||
}
|
||||
|
||||
static ssize_t gc_mode_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
@ -627,6 +633,15 @@ out:
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
if (!strcmp(a->attr.name, "blkzone_alloc_policy")) {
|
||||
if (t < BLKZONE_ALLOC_PRIOR_SEQ || t > BLKZONE_ALLOC_PRIOR_CONV)
|
||||
return -EINVAL;
|
||||
sbi->blkzone_alloc_policy = t;
|
||||
return count;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (!strcmp(a->attr.name, "compr_written_block") ||
|
||||
!strcmp(a->attr.name, "compr_saved_block")) {
|
||||
@ -1033,6 +1048,7 @@ F2FS_SBI_GENERAL_RW_ATTR(warm_data_age_threshold);
|
||||
F2FS_SBI_GENERAL_RW_ATTR(last_age_weight);
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
|
||||
F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
|
||||
#endif
|
||||
|
||||
/* STAT_INFO ATTR */
|
||||
@ -1072,6 +1088,7 @@ F2FS_GENERAL_RO_ATTR(encoding);
|
||||
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
|
||||
F2FS_GENERAL_RO_ATTR(main_blkaddr);
|
||||
F2FS_GENERAL_RO_ATTR(pending_discard);
|
||||
F2FS_GENERAL_RO_ATTR(atgc_enabled);
|
||||
F2FS_GENERAL_RO_ATTR(gc_mode);
|
||||
#ifdef CONFIG_F2FS_STAT_FS
|
||||
F2FS_GENERAL_RO_ATTR(moved_blocks_background);
|
||||
@ -1187,6 +1204,7 @@ static struct attribute *f2fs_attrs[] = {
|
||||
#endif
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
ATTR_LIST(unusable_blocks_per_sec),
|
||||
ATTR_LIST(blkzone_alloc_policy),
|
||||
#endif
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
ATTR_LIST(compr_written_block),
|
||||
@ -1200,6 +1218,7 @@ static struct attribute *f2fs_attrs[] = {
|
||||
ATTR_LIST(atgc_candidate_count),
|
||||
ATTR_LIST(atgc_age_weight),
|
||||
ATTR_LIST(atgc_age_threshold),
|
||||
ATTR_LIST(atgc_enabled),
|
||||
ATTR_LIST(seq_file_ra_mul),
|
||||
ATTR_LIST(gc_segment_mode),
|
||||
ATTR_LIST(gc_reclaimed_segments),
|
||||
|
@ -74,7 +74,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
const struct address_space_operations *aops = mapping->a_ops;
|
||||
|
||||
if (pos + count > inode->i_sb->s_maxbytes)
|
||||
if (pos + count > F2FS_BLK_TO_BYTES(max_file_blocks(inode)))
|
||||
return -EFBIG;
|
||||
|
||||
while (count) {
|
||||
@ -237,7 +237,8 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
|
||||
pos = le64_to_cpu(dloc.pos);
|
||||
|
||||
/* Get the descriptor */
|
||||
if (pos + size < pos || pos + size > inode->i_sb->s_maxbytes ||
|
||||
if (pos + size < pos ||
|
||||
pos + size > F2FS_BLK_TO_BYTES(max_file_blocks(inode)) ||
|
||||
pos < f2fs_verity_metadata_pos(inode) || size > INT_MAX) {
|
||||
f2fs_warn(F2FS_I_SB(inode), "invalid verity xattr");
|
||||
f2fs_handle_error(F2FS_I_SB(inode),
|
||||
|
@ -629,6 +629,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
|
||||
const char *name, const void *value, size_t size,
|
||||
struct page *ipage, int flags)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
struct f2fs_xattr_entry *here, *last;
|
||||
void *base_addr, *last_base_addr;
|
||||
int found, newsize;
|
||||
@ -772,9 +773,18 @@ retry:
|
||||
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
|
||||
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
|
||||
f2fs_set_encrypted_inode(inode);
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
|
||||
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
goto same;
|
||||
/*
|
||||
* In restrict mode, fsync() always try to trigger checkpoint for all
|
||||
* metadata consistency, in other mode, it triggers checkpoint when
|
||||
* parent's xattr metadata was updated.
|
||||
*/
|
||||
if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
|
||||
set_sbi_flag(sbi, SBI_NEED_CP);
|
||||
else
|
||||
f2fs_add_ino_entry(sbi, inode->i_ino, XATTR_DIR_INO);
|
||||
same:
|
||||
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
|
||||
inode->i_mode = F2FS_I(inode)->i_acl_mode;
|
||||
|
@ -139,7 +139,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
|
||||
{ CP_NODE_NEED_CP, "node needs cp" }, \
|
||||
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
|
||||
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
|
||||
{ CP_RECOVER_DIR, "dir needs recovery" })
|
||||
{ CP_RECOVER_DIR, "dir needs recovery" }, \
|
||||
{ CP_XATTR_DIR, "dir's xattr updated" })
|
||||
|
||||
#define show_shutdown_mode(type) \
|
||||
__print_symbolic(type, \
|
||||
|
Loading…
Reference in New Issue
Block a user