2018-09-12 09:16:07 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2015-07-08 17:59:36 +08:00
|
|
|
/*
|
|
|
|
* f2fs extent cache support
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Motorola Mobility
|
|
|
|
* Copyright (c) 2015 Samsung Electronics
|
|
|
|
* Authors: Jaegeuk Kim <jaegeuk@kernel.org>
|
|
|
|
* Chao Yu <chao2.yu@samsung.com>
|
2022-12-02 09:37:15 +08:00
|
|
|
*
|
|
|
|
* block_age-based extent cache added by:
|
|
|
|
* Copyright (c) 2022 xiaomi Co., Ltd.
|
|
|
|
* http://www.xiaomi.com/
|
2015-07-08 17:59:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/f2fs_fs.h>
|
|
|
|
|
|
|
|
#include "f2fs.h"
|
|
|
|
#include "node.h"
|
|
|
|
#include <trace/events/f2fs.h>
|
|
|
|
|
2023-02-07 21:48:08 +08:00
|
|
|
bool sanity_check_extent_cache(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
struct f2fs_inode_info *fi = F2FS_I(inode);
|
2023-04-07 07:56:20 +08:00
|
|
|
struct extent_tree *et = fi->extent_tree[EX_READ];
|
2023-02-07 21:48:08 +08:00
|
|
|
struct extent_info *ei;
|
|
|
|
|
2023-04-07 07:56:20 +08:00
|
|
|
if (!et)
|
2023-02-07 21:48:08 +08:00
|
|
|
return true;
|
|
|
|
|
2023-04-07 07:56:20 +08:00
|
|
|
ei = &et->largest;
|
|
|
|
if (!ei->len)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Let's drop, if checkpoint got corrupted. */
|
|
|
|
if (is_set_ckpt_flags(sbi, CP_ERROR_FLAG)) {
|
|
|
|
ei->len = 0;
|
|
|
|
et->largest_updated = true;
|
|
|
|
return true;
|
|
|
|
}
|
2023-02-07 21:48:08 +08:00
|
|
|
|
2023-04-07 07:56:20 +08:00
|
|
|
if (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE) ||
|
|
|
|
!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
|
|
|
|
DATA_GENERIC_ENHANCE)) {
|
2023-02-07 21:48:08 +08:00
|
|
|
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
|
|
|
f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
|
|
|
|
__func__, inode->i_ino,
|
|
|
|
ei->blk, ei->fofs, ei->len);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-12-01 01:44:58 +08:00
|
|
|
static void __set_extent_info(struct extent_info *ei,
|
|
|
|
unsigned int fofs, unsigned int len,
|
2022-12-01 01:26:29 +08:00
|
|
|
block_t blk, bool keep_clen,
|
2022-12-02 09:37:15 +08:00
|
|
|
unsigned long age, unsigned long last_blocks,
|
2022-12-01 01:26:29 +08:00
|
|
|
enum extent_type type)
|
2022-12-01 01:44:58 +08:00
|
|
|
{
|
|
|
|
ei->fofs = fofs;
|
|
|
|
ei->len = len;
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (type == EX_READ) {
|
|
|
|
ei->blk = blk;
|
|
|
|
if (keep_clen)
|
|
|
|
return;
|
2022-12-01 01:44:58 +08:00
|
|
|
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
2022-12-01 01:26:29 +08:00
|
|
|
ei->c_len = 0;
|
2022-12-01 01:44:58 +08:00
|
|
|
#endif
|
2022-12-02 09:37:15 +08:00
|
|
|
} else if (type == EX_BLOCK_AGE) {
|
|
|
|
ei->age = age;
|
|
|
|
ei->last_blocks = last_blocks;
|
2022-12-01 01:26:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __may_read_extent_tree(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
|
|
|
|
if (!test_opt(sbi, READ_EXTENT_CACHE))
|
|
|
|
return false;
|
|
|
|
if (is_inode_flag_set(inode, FI_NO_EXTENT))
|
|
|
|
return false;
|
|
|
|
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
|
|
|
|
!f2fs_sb_has_readonly(sbi))
|
|
|
|
return false;
|
|
|
|
return S_ISREG(inode->i_mode);
|
2022-12-01 01:44:58 +08:00
|
|
|
}
|
|
|
|
|
2022-12-02 09:37:15 +08:00
|
|
|
static bool __may_age_extent_tree(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
|
|
|
|
if (!test_opt(sbi, AGE_EXTENT_CACHE))
|
|
|
|
return false;
|
|
|
|
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
|
|
|
|
return false;
|
|
|
|
if (file_is_cold(inode))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
|
|
|
|
}
|
|
|
|
|
2022-12-03 05:51:09 +08:00
|
|
|
static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
|
2022-12-01 01:44:58 +08:00
|
|
|
{
|
2022-12-03 05:51:09 +08:00
|
|
|
if (type == EX_READ)
|
|
|
|
return __may_read_extent_tree(inode);
|
2022-12-02 09:37:15 +08:00
|
|
|
else if (type == EX_BLOCK_AGE)
|
|
|
|
return __may_age_extent_tree(inode);
|
2022-12-03 05:51:09 +08:00
|
|
|
return false;
|
|
|
|
}
|
2022-12-01 01:44:58 +08:00
|
|
|
|
2022-12-03 05:51:09 +08:00
|
|
|
static bool __may_extent_tree(struct inode *inode, enum extent_type type)
|
|
|
|
{
|
2022-12-01 01:44:58 +08:00
|
|
|
/*
|
|
|
|
* for recovered files during mount do not create extents
|
|
|
|
* if shrinker is not registered.
|
|
|
|
*/
|
2022-12-03 05:51:09 +08:00
|
|
|
if (list_empty(&F2FS_I_SB(inode)->s_list))
|
2022-12-01 01:44:58 +08:00
|
|
|
return false;
|
|
|
|
|
2022-12-03 05:51:09 +08:00
|
|
|
return __init_may_extent_tree(inode, type);
|
2022-12-01 01:44:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __try_update_largest_extent(struct extent_tree *et,
|
|
|
|
struct extent_node *en)
|
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
if (et->type != EX_READ)
|
|
|
|
return;
|
2022-12-01 01:44:58 +08:00
|
|
|
if (en->ei.len <= et->largest.len)
|
|
|
|
return;
|
|
|
|
|
|
|
|
et->largest = en->ei;
|
|
|
|
et->largest_updated = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __is_extent_mergeable(struct extent_info *back,
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_info *front, enum extent_type type)
|
2022-12-01 01:44:58 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
if (type == EX_READ) {
|
2022-12-01 01:44:58 +08:00
|
|
|
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
2022-12-01 01:26:29 +08:00
|
|
|
if (back->c_len && back->len != back->c_len)
|
|
|
|
return false;
|
|
|
|
if (front->c_len && front->len != front->c_len)
|
|
|
|
return false;
|
2022-12-01 01:44:58 +08:00
|
|
|
#endif
|
2022-12-01 01:26:29 +08:00
|
|
|
return (back->fofs + back->len == front->fofs &&
|
|
|
|
back->blk + back->len == front->blk);
|
2022-12-02 09:37:15 +08:00
|
|
|
} else if (type == EX_BLOCK_AGE) {
|
|
|
|
return (back->fofs + back->len == front->fofs &&
|
|
|
|
abs(back->age - front->age) <= SAME_AGE_REGION &&
|
|
|
|
abs(back->last_blocks - front->last_blocks) <=
|
|
|
|
SAME_AGE_REGION);
|
2022-12-01 01:26:29 +08:00
|
|
|
}
|
|
|
|
return false;
|
2022-12-01 01:44:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool __is_back_mergeable(struct extent_info *cur,
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_info *back, enum extent_type type)
|
2022-12-01 01:44:58 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
return __is_extent_mergeable(back, cur, type);
|
2022-12-01 01:44:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool __is_front_mergeable(struct extent_info *cur,
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_info *front, enum extent_type type)
|
2022-12-01 01:44:58 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
return __is_extent_mergeable(cur, front, type);
|
2022-12-01 01:44:58 +08:00
|
|
|
}
|
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
static struct extent_node *__lookup_extent_node(struct rb_root_cached *root,
|
|
|
|
struct extent_node *cached_en, unsigned int fofs)
|
2017-04-11 09:25:22 +08:00
|
|
|
{
|
2018-10-04 11:18:30 +08:00
|
|
|
struct rb_node *node = root->rb_root.rb_node;
|
2023-03-11 03:49:57 +08:00
|
|
|
struct extent_node *en;
|
|
|
|
|
|
|
|
/* check a cached entry */
|
|
|
|
if (cached_en && cached_en->ei.fofs <= fofs &&
|
|
|
|
cached_en->ei.fofs + cached_en->ei.len > fofs)
|
|
|
|
return cached_en;
|
2017-04-11 09:25:22 +08:00
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
/* check rb_tree */
|
2017-04-11 09:25:22 +08:00
|
|
|
while (node) {
|
2023-03-11 03:49:57 +08:00
|
|
|
en = rb_entry(node, struct extent_node, rb_node);
|
2017-04-11 09:25:22 +08:00
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
if (fofs < en->ei.fofs)
|
2017-04-11 09:25:22 +08:00
|
|
|
node = node->rb_left;
|
2023-03-11 03:49:57 +08:00
|
|
|
else if (fofs >= en->ei.fofs + en->ei.len)
|
2017-04-11 09:25:22 +08:00
|
|
|
node = node->rb_right;
|
|
|
|
else
|
2023-03-11 03:49:57 +08:00
|
|
|
return en;
|
2017-04-11 09:25:22 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-03-11 03:49:57 +08:00
|
|
|
* lookup rb entry in position of @fofs in rb-tree,
|
2017-04-11 09:25:22 +08:00
|
|
|
* if hit, return the entry, otherwise, return NULL
|
2023-03-11 03:49:57 +08:00
|
|
|
* @prev_ex: extent before fofs
|
|
|
|
* @next_ex: extent after fofs
|
|
|
|
* @insert_p: insert point for new extent at fofs
|
2023-02-06 19:56:00 +08:00
|
|
|
* in order to simplify the insertion after.
|
2017-04-11 09:25:22 +08:00
|
|
|
* tree must stay unchanged between lookup and insertion.
|
|
|
|
*/
|
2023-03-11 03:49:57 +08:00
|
|
|
static struct extent_node *__lookup_extent_node_ret(struct rb_root_cached *root,
|
|
|
|
struct extent_node *cached_en,
|
|
|
|
unsigned int fofs,
|
|
|
|
struct extent_node **prev_entry,
|
|
|
|
struct extent_node **next_entry,
|
2017-04-11 09:25:22 +08:00
|
|
|
struct rb_node ***insert_p,
|
2017-04-14 23:24:55 +08:00
|
|
|
struct rb_node **insert_parent,
|
2023-03-11 03:49:57 +08:00
|
|
|
bool *leftmost)
|
2017-04-11 09:25:22 +08:00
|
|
|
{
|
2018-10-04 11:18:30 +08:00
|
|
|
struct rb_node **pnode = &root->rb_root.rb_node;
|
2017-04-11 09:25:22 +08:00
|
|
|
struct rb_node *parent = NULL, *tmp_node;
|
2023-03-11 03:49:57 +08:00
|
|
|
struct extent_node *en = cached_en;
|
2017-04-11 09:25:22 +08:00
|
|
|
|
|
|
|
*insert_p = NULL;
|
|
|
|
*insert_parent = NULL;
|
|
|
|
*prev_entry = NULL;
|
|
|
|
*next_entry = NULL;
|
|
|
|
|
2018-10-04 11:18:30 +08:00
|
|
|
if (RB_EMPTY_ROOT(&root->rb_root))
|
2017-04-11 09:25:22 +08:00
|
|
|
return NULL;
|
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
if (en && en->ei.fofs <= fofs && en->ei.fofs + en->ei.len > fofs)
|
|
|
|
goto lookup_neighbors;
|
2017-04-11 09:25:22 +08:00
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
*leftmost = true;
|
2018-10-04 11:18:30 +08:00
|
|
|
|
2017-04-11 09:25:22 +08:00
|
|
|
while (*pnode) {
|
|
|
|
parent = *pnode;
|
2023-03-11 03:49:57 +08:00
|
|
|
en = rb_entry(*pnode, struct extent_node, rb_node);
|
2017-04-11 09:25:22 +08:00
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
if (fofs < en->ei.fofs) {
|
2017-04-11 09:25:22 +08:00
|
|
|
pnode = &(*pnode)->rb_left;
|
2023-03-11 03:49:57 +08:00
|
|
|
} else if (fofs >= en->ei.fofs + en->ei.len) {
|
2017-04-11 09:25:22 +08:00
|
|
|
pnode = &(*pnode)->rb_right;
|
2023-03-11 03:49:57 +08:00
|
|
|
*leftmost = false;
|
2018-10-04 11:18:30 +08:00
|
|
|
} else {
|
2017-04-11 09:25:22 +08:00
|
|
|
goto lookup_neighbors;
|
2018-10-04 11:18:30 +08:00
|
|
|
}
|
2017-04-11 09:25:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*insert_p = pnode;
|
|
|
|
*insert_parent = parent;
|
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
en = rb_entry(parent, struct extent_node, rb_node);
|
2017-04-11 09:25:22 +08:00
|
|
|
tmp_node = parent;
|
2023-03-11 03:49:57 +08:00
|
|
|
if (parent && fofs > en->ei.fofs)
|
2017-04-11 09:25:22 +08:00
|
|
|
tmp_node = rb_next(parent);
|
2023-03-11 03:49:57 +08:00
|
|
|
*next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
|
2017-04-11 09:25:22 +08:00
|
|
|
|
|
|
|
tmp_node = parent;
|
2023-03-11 03:49:57 +08:00
|
|
|
if (parent && fofs < en->ei.fofs)
|
2017-04-11 09:25:22 +08:00
|
|
|
tmp_node = rb_prev(parent);
|
2023-03-11 03:49:57 +08:00
|
|
|
*prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
|
2017-04-11 09:25:22 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
lookup_neighbors:
|
2023-03-11 03:49:57 +08:00
|
|
|
if (fofs == en->ei.fofs) {
|
2017-04-11 09:25:22 +08:00
|
|
|
/* lookup prev node for merging backward later */
|
2023-03-11 03:49:57 +08:00
|
|
|
tmp_node = rb_prev(&en->rb_node);
|
|
|
|
*prev_entry = rb_entry_safe(tmp_node,
|
|
|
|
struct extent_node, rb_node);
|
2017-04-11 09:25:22 +08:00
|
|
|
}
|
2023-03-11 03:49:57 +08:00
|
|
|
if (fofs == en->ei.fofs + en->ei.len - 1) {
|
2017-04-11 09:25:22 +08:00
|
|
|
/* lookup next node for merging frontward later */
|
2023-03-11 03:49:57 +08:00
|
|
|
tmp_node = rb_next(&en->rb_node);
|
|
|
|
*next_entry = rb_entry_safe(tmp_node,
|
|
|
|
struct extent_node, rb_node);
|
2017-04-11 09:25:22 +08:00
|
|
|
}
|
2023-03-11 03:49:57 +08:00
|
|
|
return en;
|
2017-04-11 09:25:22 +08:00
|
|
|
}
|
|
|
|
|
2015-07-08 17:59:36 +08:00
|
|
|
static struct kmem_cache *extent_tree_slab;
|
|
|
|
static struct kmem_cache *extent_node_slab;
|
|
|
|
|
|
|
|
static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
|
|
|
|
struct extent_tree *et, struct extent_info *ei,
|
2018-10-04 11:18:30 +08:00
|
|
|
struct rb_node *parent, struct rb_node **p,
|
|
|
|
bool leftmost)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[et->type];
|
2015-07-08 17:59:36 +08:00
|
|
|
struct extent_node *en;
|
|
|
|
|
2021-08-09 08:24:48 +08:00
|
|
|
en = f2fs_kmem_cache_alloc(extent_node_slab, GFP_ATOMIC, false, sbi);
|
2015-07-08 17:59:36 +08:00
|
|
|
if (!en)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
en->ei = *ei;
|
|
|
|
INIT_LIST_HEAD(&en->list);
|
2016-01-26 20:56:26 +08:00
|
|
|
en->et = et;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
rb_link_node(&en->rb_node, parent, p);
|
2018-10-04 11:18:30 +08:00
|
|
|
rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
|
2016-01-08 20:22:52 +08:00
|
|
|
atomic_inc(&et->node_cnt);
|
2022-12-01 01:26:29 +08:00
|
|
|
atomic_inc(&eti->total_ext_node);
|
2015-07-08 17:59:36 +08:00
|
|
|
return en;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __detach_extent_node(struct f2fs_sb_info *sbi,
|
|
|
|
struct extent_tree *et, struct extent_node *en)
|
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[et->type];
|
|
|
|
|
2018-10-04 11:18:30 +08:00
|
|
|
rb_erase_cached(&en->rb_node, &et->root);
|
2016-01-08 20:22:52 +08:00
|
|
|
atomic_dec(&et->node_cnt);
|
2022-12-01 01:26:29 +08:00
|
|
|
atomic_dec(&eti->total_ext_node);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
if (et->cached_en == en)
|
|
|
|
et->cached_en = NULL;
|
2016-01-26 20:56:25 +08:00
|
|
|
kmem_cache_free(extent_node_slab, en);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flow to release an extent_node:
|
|
|
|
* 1. list_del_init
|
|
|
|
* 2. __detach_extent_node
|
|
|
|
* 3. kmem_cache_free.
|
|
|
|
*/
|
|
|
|
static void __release_extent_node(struct f2fs_sb_info *sbi,
|
|
|
|
struct extent_tree *et, struct extent_node *en)
|
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[et->type];
|
|
|
|
|
|
|
|
spin_lock(&eti->extent_lock);
|
2016-01-26 20:56:26 +08:00
|
|
|
f2fs_bug_on(sbi, list_empty(&en->list));
|
|
|
|
list_del_init(&en->list);
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_unlock(&eti->extent_lock);
|
2016-01-26 20:56:25 +08:00
|
|
|
|
|
|
|
__detach_extent_node(sbi, et, en);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
static struct extent_tree *__grab_extent_tree(struct inode *inode,
|
|
|
|
enum extent_type type)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[type];
|
2015-07-08 17:59:36 +08:00
|
|
|
struct extent_tree *et;
|
|
|
|
nid_t ino = inode->i_ino;
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
mutex_lock(&eti->extent_tree_lock);
|
|
|
|
et = radix_tree_lookup(&eti->extent_tree_root, ino);
|
2015-07-08 17:59:36 +08:00
|
|
|
if (!et) {
|
2021-08-09 08:24:48 +08:00
|
|
|
et = f2fs_kmem_cache_alloc(extent_tree_slab,
|
|
|
|
GFP_NOFS, true, NULL);
|
2022-12-01 01:26:29 +08:00
|
|
|
f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
|
2015-07-08 17:59:36 +08:00
|
|
|
memset(et, 0, sizeof(struct extent_tree));
|
|
|
|
et->ino = ino;
|
2022-12-01 01:26:29 +08:00
|
|
|
et->type = type;
|
2018-10-04 11:18:30 +08:00
|
|
|
et->root = RB_ROOT_CACHED;
|
2015-07-08 17:59:36 +08:00
|
|
|
et->cached_en = NULL;
|
|
|
|
rwlock_init(&et->lock);
|
2016-01-01 07:02:16 +08:00
|
|
|
INIT_LIST_HEAD(&et->list);
|
2016-01-08 20:22:52 +08:00
|
|
|
atomic_set(&et->node_cnt, 0);
|
2022-12-01 01:26:29 +08:00
|
|
|
atomic_inc(&eti->total_ext_tree);
|
2015-12-22 11:25:50 +08:00
|
|
|
} else {
|
2022-12-01 01:26:29 +08:00
|
|
|
atomic_dec(&eti->total_zombie_tree);
|
2016-01-01 07:02:16 +08:00
|
|
|
list_del_init(&et->list);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
2022-12-01 01:26:29 +08:00
|
|
|
mutex_unlock(&eti->extent_tree_lock);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
/* never died until evict_inode */
|
2022-12-01 01:26:29 +08:00
|
|
|
F2FS_I(inode)->extent_tree[type] = et;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
return et;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
|
2016-01-26 20:56:26 +08:00
|
|
|
struct extent_tree *et)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
struct rb_node *node, *next;
|
|
|
|
struct extent_node *en;
|
2016-01-08 20:22:52 +08:00
|
|
|
unsigned int count = atomic_read(&et->node_cnt);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2018-10-04 11:18:30 +08:00
|
|
|
node = rb_first_cached(&et->root);
|
2015-07-08 17:59:36 +08:00
|
|
|
while (node) {
|
|
|
|
next = rb_next(node);
|
|
|
|
en = rb_entry(node, struct extent_node, rb_node);
|
2016-01-26 20:56:26 +08:00
|
|
|
__release_extent_node(sbi, et, en);
|
2015-07-08 17:59:36 +08:00
|
|
|
node = next;
|
|
|
|
}
|
|
|
|
|
2016-01-08 20:22:52 +08:00
|
|
|
return count - atomic_read(&et->node_cnt);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
2018-09-10 16:18:25 +08:00
|
|
|
static void __drop_largest_extent(struct extent_tree *et,
|
2015-09-17 18:24:17 +08:00
|
|
|
pgoff_t fofs, unsigned int len)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
2018-09-10 16:18:25 +08:00
|
|
|
if (fofs < et->largest.fofs + et->largest.len &&
|
|
|
|
fofs + len > et->largest.fofs) {
|
|
|
|
et->largest.len = 0;
|
|
|
|
et->largest_updated = true;
|
2016-05-21 00:52:20 +08:00
|
|
|
}
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
2022-12-03 05:51:09 +08:00
|
|
|
void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2022-12-03 05:51:09 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
|
|
|
|
struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
|
2015-07-08 17:59:36 +08:00
|
|
|
struct extent_tree *et;
|
|
|
|
struct extent_node *en;
|
|
|
|
struct extent_info ei;
|
|
|
|
|
2022-12-03 05:51:09 +08:00
|
|
|
if (!__may_extent_tree(inode, EX_READ)) {
|
2022-12-01 01:26:29 +08:00
|
|
|
/* drop largest read extent */
|
2022-12-03 05:51:09 +08:00
|
|
|
if (i_ext && i_ext->len) {
|
2020-06-28 10:58:17 +08:00
|
|
|
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
|
2015-12-29 03:39:06 +08:00
|
|
|
i_ext->len = 0;
|
2020-06-28 10:58:17 +08:00
|
|
|
set_page_dirty(ipage);
|
2015-12-29 03:39:06 +08:00
|
|
|
}
|
2022-12-01 01:26:29 +08:00
|
|
|
goto out;
|
2015-12-29 03:39:06 +08:00
|
|
|
}
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2022-12-03 05:51:09 +08:00
|
|
|
et = __grab_extent_tree(inode, EX_READ);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2015-12-29 03:39:06 +08:00
|
|
|
if (!i_ext || !i_ext->len)
|
2022-12-01 01:26:29 +08:00
|
|
|
goto out;
|
|
|
|
|
2022-12-01 01:36:43 +08:00
|
|
|
get_read_extent_info(&ei, i_ext);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
write_lock(&et->lock);
|
2016-01-08 20:22:52 +08:00
|
|
|
if (atomic_read(&et->node_cnt))
|
2022-12-01 01:26:29 +08:00
|
|
|
goto unlock_out;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2022-12-01 02:01:18 +08:00
|
|
|
en = __attach_extent_node(sbi, et, &ei, NULL,
|
|
|
|
&et->root.rb_root.rb_node, true);
|
2015-07-08 17:59:36 +08:00
|
|
|
if (en) {
|
2022-12-01 02:01:18 +08:00
|
|
|
et->largest = en->ei;
|
|
|
|
et->cached_en = en;
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_lock(&eti->extent_lock);
|
|
|
|
list_add_tail(&en->list, &eti->extent_list);
|
|
|
|
spin_unlock(&eti->extent_lock);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
2022-12-01 01:26:29 +08:00
|
|
|
unlock_out:
|
2015-07-08 17:59:36 +08:00
|
|
|
write_unlock(&et->lock);
|
2022-12-01 01:26:29 +08:00
|
|
|
out:
|
2022-12-03 05:51:09 +08:00
|
|
|
if (!F2FS_I(inode)->extent_tree[EX_READ])
|
2022-12-01 01:26:29 +08:00
|
|
|
set_inode_flag(inode, FI_NO_EXTENT);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
2022-12-02 09:37:15 +08:00
|
|
|
void f2fs_init_age_extent_tree(struct inode *inode)
|
|
|
|
{
|
|
|
|
if (!__init_may_extent_tree(inode, EX_BLOCK_AGE))
|
|
|
|
return;
|
|
|
|
__grab_extent_tree(inode, EX_BLOCK_AGE);
|
|
|
|
}
|
|
|
|
|
2022-12-03 05:51:09 +08:00
|
|
|
void f2fs_init_extent_tree(struct inode *inode)
|
2017-05-19 15:06:12 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
/* initialize read cache */
|
2022-12-03 05:51:09 +08:00
|
|
|
if (__init_may_extent_tree(inode, EX_READ))
|
|
|
|
__grab_extent_tree(inode, EX_READ);
|
2022-12-02 09:37:15 +08:00
|
|
|
|
|
|
|
/* initialize block age cache */
|
|
|
|
if (__init_may_extent_tree(inode, EX_BLOCK_AGE))
|
|
|
|
__grab_extent_tree(inode, EX_BLOCK_AGE);
|
2017-05-19 15:06:12 +08:00
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
|
|
|
|
struct extent_info *ei, enum extent_type type)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[type];
|
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
|
2015-07-08 17:59:36 +08:00
|
|
|
struct extent_node *en;
|
|
|
|
bool ret = false;
|
|
|
|
|
2022-12-22 08:14:10 +08:00
|
|
|
if (!et)
|
|
|
|
return false;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
read_lock(&et->lock);
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (type == EX_READ &&
|
|
|
|
et->largest.fofs <= pgofs &&
|
2015-07-08 17:59:36 +08:00
|
|
|
et->largest.fofs + et->largest.len > pgofs) {
|
|
|
|
*ei = et->largest;
|
|
|
|
ret = true;
|
2015-08-19 19:12:20 +08:00
|
|
|
stat_inc_largest_node_hit(sbi);
|
2015-07-08 17:59:36 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
en = __lookup_extent_node(&et->root, et->cached_en, pgofs);
|
2017-04-11 09:25:22 +08:00
|
|
|
if (!en)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (en == et->cached_en)
|
2022-12-01 01:26:29 +08:00
|
|
|
stat_inc_cached_node_hit(sbi, type);
|
2017-04-11 09:25:22 +08:00
|
|
|
else
|
2022-12-01 01:26:29 +08:00
|
|
|
stat_inc_rbtree_node_hit(sbi, type);
|
2017-04-11 09:25:22 +08:00
|
|
|
|
|
|
|
*ei = en->ei;
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_lock(&eti->extent_lock);
|
2017-04-11 09:25:22 +08:00
|
|
|
if (!list_empty(&en->list)) {
|
2022-12-01 01:26:29 +08:00
|
|
|
list_move_tail(&en->list, &eti->extent_list);
|
2017-04-11 09:25:22 +08:00
|
|
|
et->cached_en = en;
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_unlock(&eti->extent_lock);
|
2017-04-11 09:25:22 +08:00
|
|
|
ret = true;
|
2015-07-08 17:59:36 +08:00
|
|
|
out:
|
2022-12-01 01:26:29 +08:00
|
|
|
stat_inc_total_hit(sbi, type);
|
2015-07-08 17:59:36 +08:00
|
|
|
read_unlock(&et->lock);
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (type == EX_READ)
|
|
|
|
trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
|
2022-12-02 09:37:15 +08:00
|
|
|
else if (type == EX_BLOCK_AGE)
|
|
|
|
trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei);
|
2015-07-08 17:59:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-09-10 16:18:25 +08:00
|
|
|
static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
|
2015-07-15 18:05:17 +08:00
|
|
|
struct extent_tree *et, struct extent_info *ei,
|
|
|
|
struct extent_node *prev_ex,
|
2015-08-19 19:15:09 +08:00
|
|
|
struct extent_node *next_ex)
|
2015-07-15 18:05:17 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[et->type];
|
2015-07-15 18:05:17 +08:00
|
|
|
struct extent_node *en = NULL;
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
|
2015-07-15 18:05:17 +08:00
|
|
|
prev_ex->ei.len += ei->len;
|
|
|
|
ei = &prev_ex->ei;
|
|
|
|
en = prev_ex;
|
|
|
|
}
|
2015-08-19 19:15:09 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
|
2015-07-15 18:05:17 +08:00
|
|
|
next_ex->ei.fofs = ei->fofs;
|
|
|
|
next_ex->ei.len += ei->len;
|
2022-12-01 01:26:29 +08:00
|
|
|
if (et->type == EX_READ)
|
|
|
|
next_ex->ei.blk = ei->blk;
|
2016-12-19 20:10:48 +08:00
|
|
|
if (en)
|
|
|
|
__release_extent_node(sbi, et, prev_ex);
|
|
|
|
|
2015-07-15 18:05:17 +08:00
|
|
|
en = next_ex;
|
|
|
|
}
|
2015-08-19 19:15:09 +08:00
|
|
|
|
2016-01-27 01:12:50 +08:00
|
|
|
if (!en)
|
|
|
|
return NULL;
|
|
|
|
|
2018-09-10 16:18:25 +08:00
|
|
|
__try_update_largest_extent(et, en);
|
2016-01-27 01:12:50 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_lock(&eti->extent_lock);
|
2016-01-27 01:20:05 +08:00
|
|
|
if (!list_empty(&en->list)) {
|
2022-12-01 01:26:29 +08:00
|
|
|
list_move_tail(&en->list, &eti->extent_list);
|
2016-01-27 01:20:05 +08:00
|
|
|
et->cached_en = en;
|
|
|
|
}
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_unlock(&eti->extent_lock);
|
2015-08-19 19:15:09 +08:00
|
|
|
return en;
|
|
|
|
}
|
|
|
|
|
2018-09-10 16:18:25 +08:00
|
|
|
static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
|
2015-08-19 19:15:09 +08:00
|
|
|
struct extent_tree *et, struct extent_info *ei,
|
|
|
|
struct rb_node **insert_p,
|
2018-10-04 11:18:30 +08:00
|
|
|
struct rb_node *insert_parent,
|
|
|
|
bool leftmost)
|
2015-08-19 19:15:09 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[et->type];
|
2023-03-11 03:49:57 +08:00
|
|
|
struct rb_node **p = &et->root.rb_root.rb_node;
|
2015-08-19 19:15:09 +08:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct extent_node *en = NULL;
|
2015-07-15 18:05:17 +08:00
|
|
|
|
|
|
|
if (insert_p && insert_parent) {
|
|
|
|
parent = insert_parent;
|
|
|
|
p = insert_p;
|
|
|
|
goto do_insert;
|
|
|
|
}
|
|
|
|
|
2018-10-04 11:18:30 +08:00
|
|
|
leftmost = true;
|
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
/* look up extent_node in the rb tree */
|
|
|
|
while (*p) {
|
|
|
|
parent = *p;
|
|
|
|
en = rb_entry(parent, struct extent_node, rb_node);
|
|
|
|
|
|
|
|
if (ei->fofs < en->ei.fofs) {
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
} else if (ei->fofs >= en->ei.fofs + en->ei.len) {
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
leftmost = false;
|
|
|
|
} else {
|
|
|
|
f2fs_bug_on(sbi, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-15 18:05:17 +08:00
|
|
|
do_insert:
|
2018-10-04 11:18:30 +08:00
|
|
|
en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
|
2015-07-15 18:05:17 +08:00
|
|
|
if (!en)
|
|
|
|
return NULL;
|
2015-08-19 19:15:09 +08:00
|
|
|
|
2018-09-10 16:18:25 +08:00
|
|
|
__try_update_largest_extent(et, en);
|
2016-01-27 01:12:50 +08:00
|
|
|
|
|
|
|
/* update in global extent list */
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_lock(&eti->extent_lock);
|
|
|
|
list_add_tail(&en->list, &eti->extent_list);
|
2016-01-27 01:20:05 +08:00
|
|
|
et->cached_en = en;
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_unlock(&eti->extent_lock);
|
2015-07-15 18:05:17 +08:00
|
|
|
return en;
|
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
static void __update_extent_tree_range(struct inode *inode,
|
|
|
|
struct extent_info *tei, enum extent_type type)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
|
2015-09-17 18:42:06 +08:00
|
|
|
struct extent_node *en = NULL, *en1 = NULL;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
struct extent_node *prev_en = NULL, *next_en = NULL;
|
2015-07-08 17:59:36 +08:00
|
|
|
struct extent_info ei, dei, prev;
|
2015-07-15 18:05:17 +08:00
|
|
|
struct rb_node **insert_p = NULL, *insert_parent = NULL;
|
2022-12-01 01:26:29 +08:00
|
|
|
unsigned int fofs = tei->fofs, len = tei->len;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
unsigned int end = fofs + len;
|
2018-09-10 16:18:25 +08:00
|
|
|
bool updated = false;
|
f2fs: fix to initialize variable to avoid UBSAN/smatch warning
As Dan Carpenter as below:
The patch df634f444ee9: "f2fs: use rb_*_cached friends" from Oct 4,
2018, leads to the following static checker warning:
fs/f2fs/extent_cache.c:606 f2fs_update_extent_tree_range()
error: uninitialized symbol 'leftmost'.
And also Eric Biggers, and Kyungtae Kim reported, there is an UBSAN
warning described as below:
We report a bug in linux-4.20.2: "UBSAN: Undefined behaviour in
fs/f2fs/extent_cache.c"
kernel config: https://kt0755.github.io/etc/config_v4.20_stable
repro: https://kt0755.github.io/etc/repro.4a3e7.c (f2fs is mounted on
/mnt/f2fs/)
This arose in f2fs_update_extent_tree_range (fs/f2fs/extent_cache.c:605).
It seems that, for some reason, its last argument became "24"
although that was supposed to be bool type.
=========================================
UBSAN: Undefined behaviour in fs/f2fs/extent_cache.c:605:4
load of value 24 is not a valid value for type '_Bool'
CPU: 0 PID: 6774 Comm: syz-executor5 Not tainted 4.20.2 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xb1/0x118 lib/dump_stack.c:113
ubsan_epilogue+0x12/0x94 lib/ubsan.c:159
__ubsan_handle_load_invalid_value+0x17a/0x1be lib/ubsan.c:457
f2fs_update_extent_tree_range+0x1d4a/0x1d50 fs/f2fs/extent_cache.c:605
f2fs_update_extent_cache+0x2b6/0x350 fs/f2fs/extent_cache.c:804
f2fs_update_data_blkaddr+0x61/0x70 fs/f2fs/data.c:656
f2fs_outplace_write_data+0x1d6/0x4b0 fs/f2fs/segment.c:3140
f2fs_convert_inline_page+0x86d/0x2060 fs/f2fs/inline.c:163
f2fs_convert_inline_inode+0x6b5/0xad0 fs/f2fs/inline.c:208
f2fs_preallocate_blocks+0x78b/0xb00 fs/f2fs/data.c:982
f2fs_file_write_iter+0x31b/0xf40 fs/f2fs/file.c:3062
call_write_iter include/linux/fs.h:1857 [inline]
new_sync_write fs/read_write.c:474 [inline]
__vfs_write+0x538/0x6e0 fs/read_write.c:487
vfs_write+0x1b3/0x520 fs/read_write.c:549
ksys_write+0xde/0x1c0 fs/read_write.c:598
__do_sys_write fs/read_write.c:610 [inline]
__se_sys_write fs/read_write.c:607 [inline]
__x64_sys_write+0x7e/0xc0 fs/read_write.c:607
do_syscall_64+0xbe/0x4f0 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4497b9
Code: e8 8c 9f 02 00 48 83 c4 18 c3 0f 1f 80 00 00 00 00 48 89 f8 48
89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d
01 f0 ff ff 0f 83 9b 6b fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007f1ea15edc68 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007f1ea15ee6cc RCX: 00000000004497b9
RDX: 0000000000001000 RSI: 0000000020000140 RDI: 0000000000000013
RBP: 000000000071bea0 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00000000ffffffff
R13: 000000000000bb50 R14: 00000000006f4bf0 R15: 00007f1ea15ee700
=========================================
As I checked, this uninitialized variable won't cause extent cache
corruption, but in order to avoid such kind of warning of both UBSAN
and smatch, fix to initialize related variable.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reported-by: Eric Biggers <ebiggers@google.com>
Reported-by: Kyungtae Kim <kt0755@gmail.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2019-01-16 09:51:28 +08:00
|
|
|
bool leftmost = false;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
if (!et)
|
2017-02-25 17:29:54 +08:00
|
|
|
return;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (type == EX_READ)
|
|
|
|
trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
|
|
|
|
tei->blk, 0);
|
2022-12-02 09:37:15 +08:00
|
|
|
else if (type == EX_BLOCK_AGE)
|
|
|
|
trace_f2fs_update_age_extent_tree_range(inode, fofs, len,
|
|
|
|
tei->age, tei->last_blocks);
|
|
|
|
|
2015-07-08 17:59:36 +08:00
|
|
|
write_lock(&et->lock);
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (type == EX_READ) {
|
|
|
|
if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
|
|
|
|
write_unlock(&et->lock);
|
|
|
|
return;
|
|
|
|
}
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
prev = et->largest;
|
|
|
|
dei.len = 0;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
/*
|
|
|
|
* drop largest extent before lookup, in case it's already
|
|
|
|
* been shrunk from extent tree
|
|
|
|
*/
|
|
|
|
__drop_largest_extent(et, fofs, len);
|
|
|
|
}
|
2015-07-08 17:59:36 +08:00
|
|
|
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
|
2023-03-11 03:49:57 +08:00
|
|
|
en = __lookup_extent_node_ret(&et->root,
|
|
|
|
et->cached_en, fofs,
|
|
|
|
&prev_en, &next_en,
|
|
|
|
&insert_p, &insert_parent,
|
2018-10-04 11:18:30 +08:00
|
|
|
&leftmost);
|
2015-09-17 18:42:06 +08:00
|
|
|
if (!en)
|
|
|
|
en = next_en;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
|
2023-02-06 19:56:00 +08:00
|
|
|
/* 2. invalidate all extent nodes in range [fofs, fofs + len - 1] */
|
2015-09-17 18:42:06 +08:00
|
|
|
while (en && en->ei.fofs < end) {
|
|
|
|
unsigned int org_end;
|
|
|
|
int parts = 0; /* # of parts current extent split into */
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
|
2015-09-17 18:42:06 +08:00
|
|
|
next_en = en1 = NULL;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
|
|
|
|
dei = en->ei;
|
2015-09-17 18:42:06 +08:00
|
|
|
org_end = dei.fofs + dei.len;
|
2022-12-01 01:26:29 +08:00
|
|
|
f2fs_bug_on(sbi, fofs >= org_end);
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (fofs > dei.fofs && (type != EX_READ ||
|
|
|
|
fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
|
|
|
|
en->ei.len = fofs - en->ei.fofs;
|
2015-09-17 18:42:06 +08:00
|
|
|
prev_en = en;
|
|
|
|
parts = 1;
|
|
|
|
}
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (end < org_end && (type != EX_READ ||
|
|
|
|
org_end - end >= F2FS_MIN_EXTENT_LEN)) {
|
2015-09-17 18:42:06 +08:00
|
|
|
if (parts) {
|
2022-12-01 01:44:58 +08:00
|
|
|
__set_extent_info(&ei,
|
|
|
|
end, org_end - end,
|
2022-12-01 01:26:29 +08:00
|
|
|
end - dei.fofs + dei.blk, false,
|
2022-12-02 09:37:15 +08:00
|
|
|
dei.age, dei.last_blocks,
|
2022-12-01 01:26:29 +08:00
|
|
|
type);
|
2018-09-10 16:18:25 +08:00
|
|
|
en1 = __insert_extent_tree(sbi, et, &ei,
|
2018-10-04 11:18:30 +08:00
|
|
|
NULL, NULL, true);
|
2015-09-17 18:42:06 +08:00
|
|
|
next_en = en1;
|
|
|
|
} else {
|
2022-12-01 01:44:58 +08:00
|
|
|
__set_extent_info(&en->ei,
|
|
|
|
end, en->ei.len - (end - dei.fofs),
|
2022-12-01 01:26:29 +08:00
|
|
|
en->ei.blk + (end - dei.fofs), true,
|
2022-12-02 09:37:15 +08:00
|
|
|
dei.age, dei.last_blocks,
|
2022-12-01 01:26:29 +08:00
|
|
|
type);
|
2015-09-17 18:42:06 +08:00
|
|
|
next_en = en;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
}
|
2015-09-17 18:42:06 +08:00
|
|
|
parts++;
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
}
|
|
|
|
|
2015-09-17 18:42:06 +08:00
|
|
|
if (!next_en) {
|
|
|
|
struct rb_node *node = rb_next(&en->rb_node);
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
|
2016-12-20 21:57:42 +08:00
|
|
|
next_en = rb_entry_safe(node, struct extent_node,
|
|
|
|
rb_node);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
2015-09-22 21:07:47 +08:00
|
|
|
if (parts)
|
2018-09-10 16:18:25 +08:00
|
|
|
__try_update_largest_extent(et, en);
|
2015-09-22 21:07:47 +08:00
|
|
|
else
|
2016-01-26 20:56:25 +08:00
|
|
|
__release_extent_node(sbi, et, en);
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
|
|
|
|
/*
|
2015-09-17 18:42:06 +08:00
|
|
|
* if original extent is split into zero or two parts, extent
|
|
|
|
* tree has been altered by deletion or insertion, therefore
|
|
|
|
* invalidate pointers regard to tree.
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
*/
|
2015-09-17 18:42:06 +08:00
|
|
|
if (parts != 1) {
|
|
|
|
insert_p = NULL;
|
|
|
|
insert_parent = NULL;
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
2015-09-17 18:42:06 +08:00
|
|
|
en = next_en;
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
2022-12-02 09:37:15 +08:00
|
|
|
if (type == EX_BLOCK_AGE)
|
|
|
|
goto update_age_extent_cache;
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
/* 3. update extent in read extent cache */
|
|
|
|
BUG_ON(type != EX_READ);
|
|
|
|
|
|
|
|
if (tei->blk) {
|
2022-12-02 09:37:15 +08:00
|
|
|
__set_extent_info(&ei, fofs, len, tei->blk, false,
|
|
|
|
0, 0, EX_READ);
|
2018-09-10 16:18:25 +08:00
|
|
|
if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
|
|
|
|
__insert_extent_tree(sbi, et, &ei,
|
2018-10-04 11:18:30 +08:00
|
|
|
insert_p, insert_parent, leftmost);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
/* give up extent_cache, if split and small updates happen */
|
|
|
|
if (dei.len >= 1 &&
|
|
|
|
prev.len < F2FS_MIN_EXTENT_LEN &&
|
|
|
|
et->largest.len < F2FS_MIN_EXTENT_LEN) {
|
2018-09-10 16:18:25 +08:00
|
|
|
et->largest.len = 0;
|
|
|
|
et->largest_updated = true;
|
2016-05-21 01:13:22 +08:00
|
|
|
set_inode_flag(inode, FI_NO_EXTENT);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
}
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2016-05-21 01:13:22 +08:00
|
|
|
if (is_inode_flag_set(inode, FI_NO_EXTENT))
|
2016-01-26 20:56:26 +08:00
|
|
|
__free_extent_tree(sbi, et);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2018-09-10 16:18:25 +08:00
|
|
|
if (et->largest_updated) {
|
|
|
|
et->largest_updated = false;
|
|
|
|
updated = true;
|
|
|
|
}
|
2022-12-02 09:37:15 +08:00
|
|
|
goto out_read_extent_cache;
|
|
|
|
update_age_extent_cache:
|
|
|
|
if (!tei->last_blocks)
|
|
|
|
goto out_read_extent_cache;
|
2018-09-10 16:18:25 +08:00
|
|
|
|
2022-12-02 09:37:15 +08:00
|
|
|
__set_extent_info(&ei, fofs, len, 0, false,
|
|
|
|
tei->age, tei->last_blocks, EX_BLOCK_AGE);
|
|
|
|
if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
|
|
|
|
__insert_extent_tree(sbi, et, &ei,
|
|
|
|
insert_p, insert_parent, leftmost);
|
|
|
|
out_read_extent_cache:
|
2015-07-08 17:59:36 +08:00
|
|
|
write_unlock(&et->lock);
|
2018-09-10 16:18:25 +08:00
|
|
|
|
|
|
|
if (updated)
|
|
|
|
f2fs_mark_inode_dirty_sync(inode, true);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
2021-08-04 10:23:48 +08:00
|
|
|
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
2022-12-01 01:26:29 +08:00
|
|
|
void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
|
2021-08-04 10:23:48 +08:00
|
|
|
pgoff_t fofs, block_t blkaddr, unsigned int llen,
|
|
|
|
unsigned int c_len)
|
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
|
2021-08-04 10:23:48 +08:00
|
|
|
struct extent_node *en = NULL;
|
|
|
|
struct extent_node *prev_en = NULL, *next_en = NULL;
|
|
|
|
struct extent_info ei;
|
|
|
|
struct rb_node **insert_p = NULL, *insert_parent = NULL;
|
|
|
|
bool leftmost = false;
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
|
|
|
|
blkaddr, c_len);
|
2021-08-04 10:23:48 +08:00
|
|
|
|
|
|
|
/* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
|
|
|
|
if (is_inode_flag_set(inode, FI_NO_EXTENT))
|
|
|
|
return;
|
|
|
|
|
|
|
|
write_lock(&et->lock);
|
|
|
|
|
2023-03-11 03:49:57 +08:00
|
|
|
en = __lookup_extent_node_ret(&et->root,
|
|
|
|
et->cached_en, fofs,
|
|
|
|
&prev_en, &next_en,
|
|
|
|
&insert_p, &insert_parent,
|
|
|
|
&leftmost);
|
2021-08-04 10:23:48 +08:00
|
|
|
if (en)
|
|
|
|
goto unlock_out;
|
|
|
|
|
2022-12-02 09:37:15 +08:00
|
|
|
__set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ);
|
2021-08-04 10:23:48 +08:00
|
|
|
ei.c_len = c_len;
|
|
|
|
|
|
|
|
if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
|
|
|
|
__insert_extent_tree(sbi, et, &ei,
|
|
|
|
insert_p, insert_parent, leftmost);
|
|
|
|
unlock_out:
|
|
|
|
write_unlock(&et->lock);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-02-04 17:43:45 +08:00
|
|
|
static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi,
|
|
|
|
unsigned long long new,
|
2022-12-02 09:37:15 +08:00
|
|
|
unsigned long long old)
|
|
|
|
{
|
2023-02-02 16:20:27 +08:00
|
|
|
unsigned int rem_old, rem_new;
|
|
|
|
unsigned long long res;
|
2023-02-04 17:43:45 +08:00
|
|
|
unsigned int weight = sbi->last_age_weight;
|
2022-12-02 09:37:15 +08:00
|
|
|
|
2023-02-04 17:43:45 +08:00
|
|
|
res = div_u64_rem(new, 100, &rem_new) * (100 - weight)
|
|
|
|
+ div_u64_rem(old, 100, &rem_old) * weight;
|
2022-12-02 09:37:15 +08:00
|
|
|
|
2023-02-02 16:20:27 +08:00
|
|
|
if (rem_new)
|
2023-02-04 17:43:45 +08:00
|
|
|
res += rem_new * (100 - weight) / 100;
|
2023-02-02 16:20:27 +08:00
|
|
|
if (rem_old)
|
2023-02-04 17:43:45 +08:00
|
|
|
res += rem_old * weight / 100;
|
2023-02-02 16:20:27 +08:00
|
|
|
|
|
|
|
return res;
|
2022-12-02 09:37:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This returns a new age and allocated blocks in ei */
|
2022-12-17 06:41:54 +08:00
|
|
|
static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
|
|
|
|
block_t blkaddr)
|
2022-12-02 09:37:15 +08:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
|
|
loff_t f_size = i_size_read(inode);
|
|
|
|
unsigned long long cur_blocks =
|
|
|
|
atomic64_read(&sbi->allocated_data_blocks);
|
2022-12-17 08:36:36 +08:00
|
|
|
struct extent_info tei = *ei; /* only fofs and len are valid */
|
2022-12-02 09:37:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* When I/O is not aligned to a PAGE_SIZE, update will happen to the last
|
|
|
|
* file block even in seq write. So don't record age for newly last file
|
|
|
|
* block here.
|
|
|
|
*/
|
|
|
|
if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
|
2022-12-17 06:41:54 +08:00
|
|
|
blkaddr == NEW_ADDR)
|
2022-12-02 09:37:15 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2022-12-17 08:36:36 +08:00
|
|
|
if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) {
|
2022-12-02 09:37:15 +08:00
|
|
|
unsigned long long cur_age;
|
|
|
|
|
2022-12-17 08:36:36 +08:00
|
|
|
if (cur_blocks >= tei.last_blocks)
|
|
|
|
cur_age = cur_blocks - tei.last_blocks;
|
2022-12-02 09:37:15 +08:00
|
|
|
else
|
|
|
|
/* allocated_data_blocks overflow */
|
2022-12-17 08:36:36 +08:00
|
|
|
cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
|
2022-12-02 09:37:15 +08:00
|
|
|
|
2022-12-17 08:36:36 +08:00
|
|
|
if (tei.age)
|
2023-02-04 17:43:45 +08:00
|
|
|
ei->age = __calculate_block_age(sbi, cur_age, tei.age);
|
2022-12-02 09:37:15 +08:00
|
|
|
else
|
|
|
|
ei->age = cur_age;
|
|
|
|
ei->last_blocks = cur_blocks;
|
|
|
|
WARN_ON(ei->age > cur_blocks);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-12-17 06:41:54 +08:00
|
|
|
f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
|
2022-12-02 09:37:15 +08:00
|
|
|
|
|
|
|
/* the data block was allocated for the first time */
|
2022-12-17 06:41:54 +08:00
|
|
|
if (blkaddr == NEW_ADDR)
|
2022-12-02 09:37:15 +08:00
|
|
|
goto out;
|
|
|
|
|
2022-12-17 06:41:54 +08:00
|
|
|
if (__is_valid_data_blkaddr(blkaddr) &&
|
|
|
|
!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
|
2022-12-02 09:37:15 +08:00
|
|
|
f2fs_bug_on(sbi, 1);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
/*
|
|
|
|
* init block age with zero, this can happen when the block age extent
|
|
|
|
* was reclaimed due to memory constraint or system reboot
|
|
|
|
*/
|
|
|
|
ei->age = 0;
|
|
|
|
ei->last_blocks = cur_blocks;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
2022-12-17 06:05:44 +08:00
|
|
|
struct extent_info ei = {};
|
2022-12-01 01:26:29 +08:00
|
|
|
|
|
|
|
if (!__may_extent_tree(dn->inode, type))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
|
|
|
|
dn->ofs_in_node;
|
|
|
|
ei.len = 1;
|
|
|
|
|
|
|
|
if (type == EX_READ) {
|
|
|
|
if (dn->data_blkaddr == NEW_ADDR)
|
|
|
|
ei.blk = NULL_ADDR;
|
|
|
|
else
|
|
|
|
ei.blk = dn->data_blkaddr;
|
2022-12-02 09:37:15 +08:00
|
|
|
} else if (type == EX_BLOCK_AGE) {
|
2022-12-17 06:41:54 +08:00
|
|
|
if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr))
|
2022-12-02 09:37:15 +08:00
|
|
|
return;
|
2022-12-01 01:26:29 +08:00
|
|
|
}
|
|
|
|
__update_extent_tree_range(dn->inode, &ei, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
|
|
|
|
enum extent_type type)
|
|
|
|
{
|
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[type];
|
2016-01-01 07:02:16 +08:00
|
|
|
struct extent_tree *et, *next;
|
2016-01-26 20:56:26 +08:00
|
|
|
struct extent_node *en;
|
2015-07-08 17:59:36 +08:00
|
|
|
unsigned int node_cnt = 0, tree_cnt = 0;
|
|
|
|
int remained;
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (!atomic_read(&eti->total_zombie_tree))
|
2015-12-22 11:25:50 +08:00
|
|
|
goto free_node;
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (!mutex_trylock(&eti->extent_tree_lock))
|
2015-07-08 17:59:36 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* 1. remove unreferenced extent tree */
|
2022-12-01 01:26:29 +08:00
|
|
|
list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
|
2016-01-08 20:24:00 +08:00
|
|
|
if (atomic_read(&et->node_cnt)) {
|
|
|
|
write_lock(&et->lock);
|
2016-01-26 20:56:26 +08:00
|
|
|
node_cnt += __free_extent_tree(sbi, et);
|
2016-01-08 20:24:00 +08:00
|
|
|
write_unlock(&et->lock);
|
|
|
|
}
|
2016-01-26 20:56:26 +08:00
|
|
|
f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
|
2016-01-01 07:02:16 +08:00
|
|
|
list_del_init(&et->list);
|
2022-12-01 01:26:29 +08:00
|
|
|
radix_tree_delete(&eti->extent_tree_root, et->ino);
|
2016-01-01 07:02:16 +08:00
|
|
|
kmem_cache_free(extent_tree_slab, et);
|
2022-12-01 01:26:29 +08:00
|
|
|
atomic_dec(&eti->total_ext_tree);
|
|
|
|
atomic_dec(&eti->total_zombie_tree);
|
2016-01-01 07:02:16 +08:00
|
|
|
tree_cnt++;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2016-01-01 07:02:16 +08:00
|
|
|
if (node_cnt + tree_cnt >= nr_shrink)
|
|
|
|
goto unlock_out;
|
2016-01-20 07:31:48 +08:00
|
|
|
cond_resched();
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
2022-12-01 01:26:29 +08:00
|
|
|
mutex_unlock(&eti->extent_tree_lock);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2015-12-22 11:25:50 +08:00
|
|
|
free_node:
|
2015-07-08 17:59:36 +08:00
|
|
|
/* 2. remove LRU extent entries */
|
2022-12-01 01:26:29 +08:00
|
|
|
if (!mutex_trylock(&eti->extent_tree_lock))
|
2015-07-08 17:59:36 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
remained = nr_shrink - (node_cnt + tree_cnt);
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_lock(&eti->extent_lock);
|
2016-01-26 20:56:26 +08:00
|
|
|
for (; remained > 0; remained--) {
|
2022-12-01 01:26:29 +08:00
|
|
|
if (list_empty(&eti->extent_list))
|
2015-07-08 17:59:36 +08:00
|
|
|
break;
|
2022-12-01 01:26:29 +08:00
|
|
|
en = list_first_entry(&eti->extent_list,
|
2016-01-26 20:56:26 +08:00
|
|
|
struct extent_node, list);
|
|
|
|
et = en->et;
|
|
|
|
if (!write_trylock(&et->lock)) {
|
|
|
|
/* refresh this extent node's position in extent list */
|
2022-12-01 01:26:29 +08:00
|
|
|
list_move_tail(&en->list, &eti->extent_list);
|
2016-01-26 20:56:26 +08:00
|
|
|
continue;
|
|
|
|
}
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2016-01-26 20:56:26 +08:00
|
|
|
list_del_init(&en->list);
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_unlock(&eti->extent_lock);
|
2016-01-08 20:24:00 +08:00
|
|
|
|
2016-01-26 20:56:26 +08:00
|
|
|
__detach_extent_node(sbi, et, en);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2016-01-26 20:56:26 +08:00
|
|
|
write_unlock(&et->lock);
|
|
|
|
node_cnt++;
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_lock(&eti->extent_lock);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
2022-12-01 01:26:29 +08:00
|
|
|
spin_unlock(&eti->extent_lock);
|
2016-01-26 20:56:26 +08:00
|
|
|
|
2015-07-08 17:59:36 +08:00
|
|
|
unlock_out:
|
2022-12-01 01:26:29 +08:00
|
|
|
mutex_unlock(&eti->extent_tree_lock);
|
2015-07-08 17:59:36 +08:00
|
|
|
out:
|
2022-12-01 01:26:29 +08:00
|
|
|
trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
return node_cnt + tree_cnt;
|
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
/* read extent cache operations */
|
|
|
|
bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
|
|
|
|
struct extent_info *ei)
|
|
|
|
{
|
|
|
|
if (!__may_extent_tree(inode, EX_READ))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
|
|
|
|
}
|
|
|
|
|
2022-11-28 17:15:14 +08:00
|
|
|
bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
|
|
|
|
block_t *blkaddr)
|
|
|
|
{
|
|
|
|
struct extent_info ei = {};
|
|
|
|
|
|
|
|
if (!f2fs_lookup_read_extent_cache(inode, index, &ei))
|
|
|
|
return false;
|
|
|
|
*blkaddr = ei.blk + index - ei.fofs;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
|
|
|
|
{
|
|
|
|
return __update_extent_cache(dn, EX_READ);
|
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
|
|
|
|
pgoff_t fofs, block_t blkaddr, unsigned int len)
|
|
|
|
{
|
|
|
|
struct extent_info ei = {
|
|
|
|
.fofs = fofs,
|
|
|
|
.len = len,
|
|
|
|
.blk = blkaddr,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!__may_extent_tree(dn->inode, EX_READ))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__update_extent_tree_range(dn->inode, &ei, EX_READ);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
|
|
|
|
{
|
|
|
|
if (!test_opt(sbi, READ_EXTENT_CACHE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
|
|
|
|
}
|
|
|
|
|
2022-12-02 09:37:15 +08:00
|
|
|
/* block age extent cache operations */
|
|
|
|
bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
|
|
|
|
struct extent_info *ei)
|
|
|
|
{
|
|
|
|
if (!__may_extent_tree(inode, EX_BLOCK_AGE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_update_age_extent_cache(struct dnode_of_data *dn)
|
|
|
|
{
|
|
|
|
return __update_extent_cache(dn, EX_BLOCK_AGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
|
|
|
|
pgoff_t fofs, unsigned int len)
|
|
|
|
{
|
|
|
|
struct extent_info ei = {
|
|
|
|
.fofs = fofs,
|
|
|
|
.len = len,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
|
|
|
|
{
|
|
|
|
if (!test_opt(sbi, AGE_EXTENT_CACHE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
|
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
static unsigned int __destroy_extent_node(struct inode *inode,
|
|
|
|
enum extent_type type)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
|
2015-07-08 17:59:36 +08:00
|
|
|
unsigned int node_cnt = 0;
|
|
|
|
|
2016-01-08 20:24:00 +08:00
|
|
|
if (!et || !atomic_read(&et->node_cnt))
|
2015-07-08 17:59:36 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
write_lock(&et->lock);
|
2016-01-26 20:56:26 +08:00
|
|
|
node_cnt = __free_extent_tree(sbi, et);
|
2015-07-08 17:59:36 +08:00
|
|
|
write_unlock(&et->lock);
|
|
|
|
|
|
|
|
return node_cnt;
|
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
void f2fs_destroy_extent_node(struct inode *inode)
|
|
|
|
{
|
|
|
|
__destroy_extent_node(inode, EX_READ);
|
2022-12-02 09:37:15 +08:00
|
|
|
__destroy_extent_node(inode, EX_BLOCK_AGE);
|
2022-12-01 01:26:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __drop_extent_tree(struct inode *inode, enum extent_type type)
|
2016-07-13 02:07:52 +08:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
|
2018-09-10 16:18:25 +08:00
|
|
|
bool updated = false;
|
2016-07-13 02:07:52 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
if (!__may_extent_tree(inode, type))
|
2018-01-27 17:29:49 +08:00
|
|
|
return;
|
|
|
|
|
2016-07-13 02:07:52 +08:00
|
|
|
write_lock(&et->lock);
|
|
|
|
__free_extent_tree(sbi, et);
|
2022-12-01 01:26:29 +08:00
|
|
|
if (type == EX_READ) {
|
|
|
|
set_inode_flag(inode, FI_NO_EXTENT);
|
|
|
|
if (et->largest.len) {
|
|
|
|
et->largest.len = 0;
|
|
|
|
updated = true;
|
|
|
|
}
|
2018-09-10 16:18:25 +08:00
|
|
|
}
|
2016-07-13 02:07:52 +08:00
|
|
|
write_unlock(&et->lock);
|
2018-09-10 16:18:25 +08:00
|
|
|
if (updated)
|
|
|
|
f2fs_mark_inode_dirty_sync(inode, true);
|
2016-07-13 02:07:52 +08:00
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
void f2fs_drop_extent_tree(struct inode *inode)
|
|
|
|
{
|
|
|
|
__drop_extent_tree(inode, EX_READ);
|
2022-12-02 09:37:15 +08:00
|
|
|
__drop_extent_tree(inode, EX_BLOCK_AGE);
|
2022-12-01 01:26:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
2022-12-01 01:26:29 +08:00
|
|
|
struct extent_tree_info *eti = &sbi->extent_tree[type];
|
|
|
|
struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
|
2015-07-08 17:59:36 +08:00
|
|
|
unsigned int node_cnt = 0;
|
|
|
|
|
|
|
|
if (!et)
|
|
|
|
return;
|
|
|
|
|
2016-01-08 20:22:52 +08:00
|
|
|
if (inode->i_nlink && !is_bad_inode(inode) &&
|
|
|
|
atomic_read(&et->node_cnt)) {
|
2022-12-01 01:26:29 +08:00
|
|
|
mutex_lock(&eti->extent_tree_lock);
|
|
|
|
list_add_tail(&et->list, &eti->zombie_list);
|
|
|
|
atomic_inc(&eti->total_zombie_tree);
|
|
|
|
mutex_unlock(&eti->extent_tree_lock);
|
2015-07-08 17:59:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* free all extent info belong to this extent tree */
|
2022-12-01 01:26:29 +08:00
|
|
|
node_cnt = __destroy_extent_node(inode, type);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
|
|
|
/* delete extent tree entry in radix tree */
|
2022-12-01 01:26:29 +08:00
|
|
|
mutex_lock(&eti->extent_tree_lock);
|
2016-01-08 20:22:52 +08:00
|
|
|
f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
|
2022-12-01 01:26:29 +08:00
|
|
|
radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
|
2015-07-08 17:59:36 +08:00
|
|
|
kmem_cache_free(extent_tree_slab, et);
|
2022-12-01 01:26:29 +08:00
|
|
|
atomic_dec(&eti->total_ext_tree);
|
|
|
|
mutex_unlock(&eti->extent_tree_lock);
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
F2FS_I(inode)->extent_tree[type] = NULL;
|
2015-07-08 17:59:36 +08:00
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
void f2fs_destroy_extent_tree(struct inode *inode)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
__destroy_extent_tree(inode, EX_READ);
|
2022-12-02 09:37:15 +08:00
|
|
|
__destroy_extent_tree(inode, EX_BLOCK_AGE);
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
}
|
|
|
|
|
2022-12-01 01:26:29 +08:00
|
|
|
static void __init_extent_tree_info(struct extent_tree_info *eti)
|
f2fs: update extent tree in batches
This patch introduce a new helper f2fs_update_extent_tree_range which can
do extent mapping update at a specified range.
The main idea is:
1) punch all mapping info in extent node(s) which are at a specified range;
2) try to merge new extent mapping with adjacent node, or failing that,
insert the mapping into extent tree as a new node.
In order to see the benefit, I add a function for stating time stamping
count as below:
uint64_t rdtsc(void)
{
uint32_t lo, hi;
__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
return (uint64_t)hi << 32 | lo;
}
My test environment is: ubuntu, intel i7-3770, 16G memory, 256g micron ssd.
truncation path: update extent cache from truncate_data_blocks_range
non-truncataion path: update extent cache from other paths
total: all update paths
a) Removing 128MB file which has one extent node mapping whole range of
file:
1. dd if=/dev/zero of=/mnt/f2fs/128M bs=1M count=128
2. sync
3. rm /mnt/f2fs/128M
Before:
total count average
truncation: 7651022 32768 233.49
Patched:
total count average
truncation: 3321 33 100.64
b) fsstress:
fsstress -d /mnt/f2fs -l 5 -n 100 -p 20
Test times: 5 times.
Before:
total count average
truncation: 5812480.6 20911.6 277.95
non-truncation: 7783845.6 13440.8 579.12
total: 13596326.2 34352.4 395.79
Patched:
total count average
truncation: 1281283.0 3041.6 421.25
non-truncation: 7355844.4 13662.8 538.38
total: 8637127.4 16704.4 517.06
1) For the updates in truncation path:
- we can see updating in batches leads total tsc and update count reducing
explicitly;
- besides, for a single batched updating, punching multiple extent nodes
in a loop, result in executing more operations, so our average tsc
increase intensively.
2) For the updates in non-truncation path:
- there is a little improvement, that is because for the scenario that we
just need to update in the head or tail of extent node, new interface
optimize to update info in extent node directly, rather than removing
original extent node for updating and then inserting that updated one
into cache as new node.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-08-26 20:34:48 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
|
|
|
|
mutex_init(&eti->extent_tree_lock);
|
|
|
|
INIT_LIST_HEAD(&eti->extent_list);
|
|
|
|
spin_lock_init(&eti->extent_lock);
|
|
|
|
atomic_set(&eti->total_ext_tree, 0);
|
|
|
|
INIT_LIST_HEAD(&eti->zombie_list);
|
|
|
|
atomic_set(&eti->total_zombie_tree, 0);
|
|
|
|
atomic_set(&eti->total_ext_node, 0);
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
f2fs: clean up symbol namespace
As Ted reported:
"Hi, I was looking at f2fs's sources recently, and I noticed that there
is a very large number of non-static symbols which don't have a f2fs
prefix. There's well over a hundred (see attached below).
As one example, in fs/f2fs/dir.c there is:
unsigned char get_de_type(struct f2fs_dir_entry *de)
This function is clearly only useful for f2fs, but it has a generic
name. This means that if any other file system tries to have the same
symbol name, there will be a symbol conflict and the kernel would not
successfully build. It also means that when someone is looking f2fs
sources, it's not at all obvious whether a function such as
read_data_page(), invalidate_blocks(), is a generic kernel function
found in the fs, mm, or block layers, or a f2fs specific function.
You might want to fix this at some point. Hopefully Kent's bcachefs
isn't similarly using genericly named functions, since that might
cause conflicts with f2fs's functions --- but just as this would be a
problem that we would rightly insist that Kent fix, this is something
that we should have rightly insisted that f2fs should have fixed
before it was integrated into the mainline kernel.
acquire_orphan_inode
add_ino_entry
add_orphan_inode
allocate_data_block
allocate_new_segments
alloc_nid
alloc_nid_done
alloc_nid_failed
available_free_memory
...."
This patch adds "f2fs_" prefix for all non-static symbols in order to:
a) avoid conflict with other kernel generic symbols;
b) to indicate the function is f2fs specific one instead of generic
one;
Reported-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2018-05-30 00:20:41 +08:00
|
|
|
void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
2022-12-01 01:26:29 +08:00
|
|
|
__init_extent_tree_info(&sbi->extent_tree[EX_READ]);
|
2022-12-02 09:37:15 +08:00
|
|
|
__init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]);
|
|
|
|
|
|
|
|
/* initialize for block age extents */
|
|
|
|
atomic64_set(&sbi->allocated_data_blocks, 0);
|
|
|
|
sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
|
|
|
|
sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
|
2023-02-04 17:43:45 +08:00
|
|
|
sbi->last_age_weight = LAST_AGE_WEIGHT;
|
2015-07-08 17:59:36 +08:00
|
|
|
}
|
|
|
|
|
f2fs: clean up symbol namespace
As Ted reported:
"Hi, I was looking at f2fs's sources recently, and I noticed that there
is a very large number of non-static symbols which don't have a f2fs
prefix. There's well over a hundred (see attached below).
As one example, in fs/f2fs/dir.c there is:
unsigned char get_de_type(struct f2fs_dir_entry *de)
This function is clearly only useful for f2fs, but it has a generic
name. This means that if any other file system tries to have the same
symbol name, there will be a symbol conflict and the kernel would not
successfully build. It also means that when someone is looking f2fs
sources, it's not at all obvious whether a function such as
read_data_page(), invalidate_blocks(), is a generic kernel function
found in the fs, mm, or block layers, or a f2fs specific function.
You might want to fix this at some point. Hopefully Kent's bcachefs
isn't similarly using genericly named functions, since that might
cause conflicts with f2fs's functions --- but just as this would be a
problem that we would rightly insist that Kent fix, this is something
that we should have rightly insisted that f2fs should have fixed
before it was integrated into the mainline kernel.
acquire_orphan_inode
add_ino_entry
add_orphan_inode
allocate_data_block
allocate_new_segments
alloc_nid
alloc_nid_done
alloc_nid_failed
available_free_memory
...."
This patch adds "f2fs_" prefix for all non-static symbols in order to:
a) avoid conflict with other kernel generic symbols;
b) to indicate the function is f2fs specific one instead of generic
one;
Reported-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2018-05-30 00:20:41 +08:00
|
|
|
int __init f2fs_create_extent_cache(void)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
|
|
|
|
sizeof(struct extent_tree));
|
|
|
|
if (!extent_tree_slab)
|
|
|
|
return -ENOMEM;
|
|
|
|
extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
|
|
|
|
sizeof(struct extent_node));
|
|
|
|
if (!extent_node_slab) {
|
|
|
|
kmem_cache_destroy(extent_tree_slab);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
f2fs: clean up symbol namespace
As Ted reported:
"Hi, I was looking at f2fs's sources recently, and I noticed that there
is a very large number of non-static symbols which don't have a f2fs
prefix. There's well over a hundred (see attached below).
As one example, in fs/f2fs/dir.c there is:
unsigned char get_de_type(struct f2fs_dir_entry *de)
This function is clearly only useful for f2fs, but it has a generic
name. This means that if any other file system tries to have the same
symbol name, there will be a symbol conflict and the kernel would not
successfully build. It also means that when someone is looking f2fs
sources, it's not at all obvious whether a function such as
read_data_page(), invalidate_blocks(), is a generic kernel function
found in the fs, mm, or block layers, or a f2fs specific function.
You might want to fix this at some point. Hopefully Kent's bcachefs
isn't similarly using genericly named functions, since that might
cause conflicts with f2fs's functions --- but just as this would be a
problem that we would rightly insist that Kent fix, this is something
that we should have rightly insisted that f2fs should have fixed
before it was integrated into the mainline kernel.
acquire_orphan_inode
add_ino_entry
add_orphan_inode
allocate_data_block
allocate_new_segments
alloc_nid
alloc_nid_done
alloc_nid_failed
available_free_memory
...."
This patch adds "f2fs_" prefix for all non-static symbols in order to:
a) avoid conflict with other kernel generic symbols;
b) to indicate the function is f2fs specific one instead of generic
one;
Reported-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2018-05-30 00:20:41 +08:00
|
|
|
void f2fs_destroy_extent_cache(void)
|
2015-07-08 17:59:36 +08:00
|
|
|
{
|
|
|
|
kmem_cache_destroy(extent_node_slab);
|
|
|
|
kmem_cache_destroy(extent_tree_slab);
|
|
|
|
}
|