f2fs: use extent_cache by default

We don't need to handle the duplicate extent information.

The integrated rule is:
 - update on-disk extent with largest one tracked by in-memory extent_cache
 - destroy extent_tree for the truncation case
 - drop per-inode extent_cache by shrinker

Reviewed-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Jaegeuk Kim 2015-06-19 17:53:26 -07:00
parent 7daaea256d
commit 3e72f72139
6 changed files with 142 additions and 265 deletions

View File

@ -266,103 +266,6 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
return err; return err;
} }
static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
struct extent_info *ei)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
pgoff_t start_fofs, end_fofs;
block_t start_blkaddr;
read_lock(&fi->ext_lock);
if (fi->ext.len == 0) {
read_unlock(&fi->ext_lock);
return false;
}
stat_inc_total_hit(inode->i_sb);
start_fofs = fi->ext.fofs;
end_fofs = fi->ext.fofs + fi->ext.len - 1;
start_blkaddr = fi->ext.blk;
if (pgofs >= start_fofs && pgofs <= end_fofs) {
*ei = fi->ext;
stat_inc_read_hit(inode->i_sb);
read_unlock(&fi->ext_lock);
return true;
}
read_unlock(&fi->ext_lock);
return false;
}
static bool update_extent_info(struct inode *inode, pgoff_t fofs,
block_t blkaddr)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
pgoff_t start_fofs, end_fofs;
block_t start_blkaddr, end_blkaddr;
int need_update = true;
write_lock(&fi->ext_lock);
start_fofs = fi->ext.fofs;
end_fofs = fi->ext.fofs + fi->ext.len - 1;
start_blkaddr = fi->ext.blk;
end_blkaddr = fi->ext.blk + fi->ext.len - 1;
/* Drop and initialize the matched extent */
if (fi->ext.len == 1 && fofs == start_fofs)
fi->ext.len = 0;
/* Initial extent */
if (fi->ext.len == 0) {
if (blkaddr != NULL_ADDR) {
fi->ext.fofs = fofs;
fi->ext.blk = blkaddr;
fi->ext.len = 1;
}
goto end_update;
}
/* Front merge */
if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
fi->ext.fofs--;
fi->ext.blk--;
fi->ext.len++;
goto end_update;
}
/* Back merge */
if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
fi->ext.len++;
goto end_update;
}
/* Split the existing extent */
if (fi->ext.len > 1 &&
fofs >= start_fofs && fofs <= end_fofs) {
if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
fi->ext.len = fofs - start_fofs;
} else {
fi->ext.fofs = fofs + 1;
fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
fi->ext.len -= fofs - start_fofs + 1;
}
} else {
need_update = false;
}
/* Finally, if the extent is very fragmented, let's drop the cache. */
if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
fi->ext.len = 0;
set_inode_flag(fi, FI_NO_EXTENT);
need_update = true;
}
end_update:
write_unlock(&fi->ext_lock);
return need_update;
}
static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_info *ei, struct extent_tree *et, struct extent_info *ei,
struct rb_node *parent, struct rb_node **p) struct rb_node *parent, struct rb_node **p)
@ -394,23 +297,6 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi,
et->cached_en = NULL; et->cached_en = NULL;
} }
static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi,
nid_t ino)
{
struct extent_tree *et;
down_read(&sbi->extent_tree_lock);
et = radix_tree_lookup(&sbi->extent_tree_root, ino);
if (!et) {
up_read(&sbi->extent_tree_lock);
return NULL;
}
atomic_inc(&et->refcount);
up_read(&sbi->extent_tree_lock);
return et;
}
static struct extent_tree *__grab_extent_tree(struct inode *inode) static struct extent_tree *__grab_extent_tree(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@ -434,6 +320,9 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
atomic_inc(&et->refcount); atomic_inc(&et->refcount);
up_write(&sbi->extent_tree_lock); up_write(&sbi->extent_tree_lock);
/* never died untill evict_inode */
F2FS_I(inode)->extent_tree = et;
return et; return et;
} }
@ -522,7 +411,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
en->ei.blk = ei->blk; en->ei.blk = ei->blk;
en->ei.len += ei->len; en->ei.len += ei->len;
*den = __try_back_merge(sbi, et, en); *den = __try_back_merge(sbi, et, en);
return en; goto update_out;
} }
p = &(*p)->rb_left; p = &(*p)->rb_left;
} else if (ei->fofs >= en->ei.fofs + en->ei.len) { } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
@ -530,7 +419,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, !den); f2fs_bug_on(sbi, !den);
en->ei.len += ei->len; en->ei.len += ei->len;
*den = __try_front_merge(sbi, et, en); *den = __try_front_merge(sbi, et, en);
return en; goto update_out;
} }
p = &(*p)->rb_right; p = &(*p)->rb_right;
} else { } else {
@ -538,7 +427,14 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
} }
} }
return __attach_extent_node(sbi, et, ei, parent, p); en = __attach_extent_node(sbi, et, ei, parent, p);
if (!en)
return NULL;
update_out:
if (en->ei.len > et->largest.len)
et->largest = en->ei;
et->cached_en = en;
return en;
} }
static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
@ -570,51 +466,56 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
return count - et->count; return count - et->count;
} }
static void f2fs_init_extent_tree(struct inode *inode, static void __drop_largest_extent(struct inode *inode, pgoff_t fofs)
struct f2fs_extent *i_ext) {
struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
if (largest->fofs <= fofs && largest->fofs + largest->len > fofs)
largest->len = 0;
}
void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et; struct extent_tree *et;
struct extent_node *en; struct extent_node *en;
struct extent_info ei; struct extent_info ei;
if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN) if (!f2fs_may_extent_tree(inode))
return; return;
et = __grab_extent_tree(inode); et = __grab_extent_tree(inode);
if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
return;
set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
write_lock(&et->lock); write_lock(&et->lock);
if (et->count) if (et->count)
goto out; goto out;
set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
en = __insert_extent_tree(sbi, et, &ei, NULL); en = __insert_extent_tree(sbi, et, &ei, NULL);
if (en) { if (en) {
et->cached_en = en;
spin_lock(&sbi->extent_lock); spin_lock(&sbi->extent_lock);
list_add_tail(&en->list, &sbi->extent_list); list_add_tail(&en->list, &sbi->extent_list);
spin_unlock(&sbi->extent_lock); spin_unlock(&sbi->extent_lock);
} }
out: out:
write_unlock(&et->lock); write_unlock(&et->lock);
atomic_dec(&et->refcount);
} }
static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
struct extent_info *ei) struct extent_info *ei)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et; struct extent_tree *et = F2FS_I(inode)->extent_tree;
struct extent_node *en; struct extent_node *en;
trace_f2fs_lookup_extent_tree_start(inode, pgofs); f2fs_bug_on(sbi, !et);
et = __find_extent_tree(sbi, inode->i_ino); trace_f2fs_lookup_extent_tree_start(inode, pgofs);
if (!et)
return false;
read_lock(&et->lock); read_lock(&et->lock);
en = __lookup_extent_tree(et, pgofs); en = __lookup_extent_tree(et, pgofs);
@ -631,27 +532,38 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
read_unlock(&et->lock); read_unlock(&et->lock);
trace_f2fs_lookup_extent_tree_end(inode, pgofs, en); trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
atomic_dec(&et->refcount);
return en ? true : false; return en ? true : false;
} }
static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs, /* return true, if on-disk extent should be updated */
static bool f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
block_t blkaddr) block_t blkaddr)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et; struct extent_tree *et = F2FS_I(inode)->extent_tree;
struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL; struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
struct extent_node *den = NULL; struct extent_node *den = NULL;
struct extent_info ei, dei; struct extent_info ei, dei, prev;
unsigned int endofs; unsigned int endofs;
if (!et)
return false;
trace_f2fs_update_extent_tree(inode, fofs, blkaddr); trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
et = __grab_extent_tree(inode);
write_lock(&et->lock); write_lock(&et->lock);
if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
write_unlock(&et->lock);
return false;
}
prev = et->largest;
dei.len = 0;
/* we do not guarantee that the largest extent is cached all the time */
__drop_largest_extent(inode, fofs);
/* 1. lookup and remove existing extent info in cache */ /* 1. lookup and remove existing extent info in cache */
en = __lookup_extent_tree(et, fofs); en = __lookup_extent_tree(et, fofs);
if (!en) if (!en)
@ -683,6 +595,14 @@ update_extent:
if (blkaddr) { if (blkaddr) {
set_extent_info(&ei, fofs, blkaddr, 1); set_extent_info(&ei, fofs, blkaddr, 1);
en3 = __insert_extent_tree(sbi, et, &ei, &den); en3 = __insert_extent_tree(sbi, et, &ei, &den);
/* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 &&
prev.len < F2FS_MIN_EXTENT_LEN &&
et->largest.len < F2FS_MIN_EXTENT_LEN) {
et->largest.len = 0;
set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
}
} }
/* 4. update in global extent list */ /* 4. update in global extent list */
@ -714,57 +634,12 @@ update_extent:
if (den) if (den)
kmem_cache_free(extent_node_slab, den); kmem_cache_free(extent_node_slab, den);
if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
__free_extent_tree(sbi, et, true);
write_unlock(&et->lock); write_unlock(&et->lock);
atomic_dec(&et->refcount);
}
void f2fs_preserve_extent_tree(struct inode *inode) return !__is_extent_same(&prev, &et->largest);
{
struct extent_tree *et;
struct extent_info *ext = &F2FS_I(inode)->ext;
bool sync = false;
if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
return;
et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino);
if (!et) {
if (ext->len) {
ext->len = 0;
update_inode_page(inode);
}
return;
}
read_lock(&et->lock);
if (et->count) {
struct extent_node *en;
if (et->cached_en) {
en = et->cached_en;
} else {
struct rb_node *node = rb_first(&et->root);
if (!node)
node = rb_last(&et->root);
en = rb_entry(node, struct extent_node, rb_node);
}
if (__is_extent_same(ext, &en->ei))
goto out;
*ext = en->ei;
sync = true;
} else if (ext->len) {
ext->len = 0;
sync = true;
}
out:
read_unlock(&et->lock);
atomic_dec(&et->refcount);
if (sync)
update_inode_page(inode);
} }
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
@ -772,8 +647,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
struct extent_tree *treevec[EXT_TREE_VEC_SIZE]; struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
struct extent_node *en, *tmp; struct extent_node *en, *tmp;
unsigned long ino = F2FS_ROOT_INO(sbi); unsigned long ino = F2FS_ROOT_INO(sbi);
struct radix_tree_iter iter; struct radix_tree_root *root = &sbi->extent_tree_root;
void **slot;
unsigned int found; unsigned int found;
unsigned int node_cnt = 0, tree_cnt = 0; unsigned int node_cnt = 0, tree_cnt = 0;
@ -788,10 +662,10 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
} }
spin_unlock(&sbi->extent_lock); spin_unlock(&sbi->extent_lock);
if (!down_read_trylock(&sbi->extent_tree_lock)) if (!down_write_trylock(&sbi->extent_tree_lock))
goto out; goto out;
while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root, while ((found = radix_tree_gang_lookup(root,
(void **)treevec, ino, EXT_TREE_VEC_SIZE))) { (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
unsigned i; unsigned i;
@ -799,27 +673,15 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
for (i = 0; i < found; i++) { for (i = 0; i < found; i++) {
struct extent_tree *et = treevec[i]; struct extent_tree *et = treevec[i];
atomic_inc(&et->refcount);
write_lock(&et->lock); write_lock(&et->lock);
node_cnt += __free_extent_tree(sbi, et, false); node_cnt += __free_extent_tree(sbi, et, false);
write_unlock(&et->lock); write_unlock(&et->lock);
atomic_dec(&et->refcount); if (!atomic_read(&et->refcount) && !et->count) {
} radix_tree_delete(root, et->ino);
} kmem_cache_free(extent_tree_slab, et);
up_read(&sbi->extent_tree_lock); sbi->total_ext_tree--;
tree_cnt++;
if (!down_write_trylock(&sbi->extent_tree_lock)) }
goto out;
radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
F2FS_ROOT_INO(sbi)) {
struct extent_tree *et = (struct extent_tree *)*slot;
if (!atomic_read(&et->refcount) && !et->count) {
radix_tree_delete(&sbi->extent_tree_root, et->ino);
kmem_cache_free(extent_tree_slab, et);
sbi->total_ext_tree--;
tree_cnt++;
} }
} }
up_write(&sbi->extent_tree_lock); up_write(&sbi->extent_tree_lock);
@ -829,63 +691,61 @@ out:
return node_cnt + tree_cnt; return node_cnt + tree_cnt;
} }
void f2fs_destroy_extent_tree(struct inode *inode) unsigned int f2fs_destroy_extent_node(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et; struct extent_tree *et = F2FS_I(inode)->extent_tree;
unsigned int node_cnt = 0; unsigned int node_cnt = 0;
if (!test_opt(sbi, EXTENT_CACHE))
return;
et = __find_extent_tree(sbi, inode->i_ino);
if (!et) if (!et)
goto out; return 0;
/* free all extent info belong to this extent tree */
write_lock(&et->lock); write_lock(&et->lock);
node_cnt = __free_extent_tree(sbi, et, true); node_cnt = __free_extent_tree(sbi, et, true);
write_unlock(&et->lock); write_unlock(&et->lock);
atomic_dec(&et->refcount); return node_cnt;
}
/* try to find and delete extent tree entry in radix tree */ void f2fs_destroy_extent_tree(struct inode *inode)
down_write(&sbi->extent_tree_lock); {
et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (!et) { struct extent_tree *et = F2FS_I(inode)->extent_tree;
up_write(&sbi->extent_tree_lock); unsigned int node_cnt = 0;
goto out;
if (!et)
return;
if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
atomic_dec(&et->refcount);
return;
} }
/* free all extent info belong to this extent tree */
node_cnt = f2fs_destroy_extent_node(inode);
/* delete extent tree entry in radix tree */
down_write(&sbi->extent_tree_lock);
atomic_dec(&et->refcount);
f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count); f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
radix_tree_delete(&sbi->extent_tree_root, inode->i_ino); radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
kmem_cache_free(extent_tree_slab, et); kmem_cache_free(extent_tree_slab, et);
sbi->total_ext_tree--; sbi->total_ext_tree--;
up_write(&sbi->extent_tree_lock); up_write(&sbi->extent_tree_lock);
out:
F2FS_I(inode)->extent_tree = NULL;
trace_f2fs_destroy_extent_tree(inode, node_cnt); trace_f2fs_destroy_extent_tree(inode, node_cnt);
return; return;
} }
void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext)
{
if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
f2fs_init_extent_tree(inode, i_ext);
write_lock(&F2FS_I(inode)->ext_lock);
get_extent_info(&F2FS_I(inode)->ext, *i_ext);
write_unlock(&F2FS_I(inode)->ext_lock);
}
static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
struct extent_info *ei) struct extent_info *ei)
{ {
if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) if (!f2fs_may_extent_tree(inode))
return false; return false;
if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE)) return f2fs_lookup_extent_tree(inode, pgofs, ei);
return f2fs_lookup_extent_tree(inode, pgofs, ei);
return lookup_extent_info(inode, pgofs, ei);
} }
void f2fs_update_extent_cache(struct dnode_of_data *dn) void f2fs_update_extent_cache(struct dnode_of_data *dn)
@ -893,19 +753,15 @@ void f2fs_update_extent_cache(struct dnode_of_data *dn)
struct f2fs_inode_info *fi = F2FS_I(dn->inode); struct f2fs_inode_info *fi = F2FS_I(dn->inode);
pgoff_t fofs; pgoff_t fofs;
f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); if (!f2fs_may_extent_tree(dn->inode))
if (is_inode_flag_set(fi, FI_NO_EXTENT))
return; return;
f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
dn->ofs_in_node; dn->ofs_in_node;
/* we should call update_extent_info() to update on-disk extent */ if (f2fs_update_extent_tree(dn->inode, fofs, dn->data_blkaddr))
if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
f2fs_update_extent_tree(dn->inode, fofs, dn->data_blkaddr);
if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
sync_inode_page(dn); sync_inode_page(dn);
} }
@ -1109,8 +965,6 @@ alloc:
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
&sum, seg); &sum, seg);
/* direct IO doesn't use extent cache to maximize the performance */
set_data_blkaddr(dn); set_data_blkaddr(dn);
/* update i_size */ /* update i_size */
@ -1119,6 +973,9 @@ alloc:
if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
/* direct IO doesn't use extent cache to maximize the performance */
__drop_largest_extent(dn->inode, fofs);
return 0; return 0;
} }

View File

@ -349,6 +349,7 @@ struct extent_tree {
nid_t ino; /* inode number */ nid_t ino; /* inode number */
struct rb_root root; /* root of extent info rb-tree */ struct rb_root root; /* root of extent info rb-tree */
struct extent_node *cached_en; /* recently accessed extent node */ struct extent_node *cached_en; /* recently accessed extent node */
struct extent_info largest; /* largested extent info */
rwlock_t lock; /* protect extent info rb-tree */ rwlock_t lock; /* protect extent info rb-tree */
atomic_t refcount; /* reference count of rb-tree */ atomic_t refcount; /* reference count of rb-tree */
unsigned int count; /* # of extent node in rb-tree*/ unsigned int count; /* # of extent node in rb-tree*/
@ -420,14 +421,14 @@ struct f2fs_inode_info {
unsigned int clevel; /* maximum level of given file name */ unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */ nid_t i_xattr_nid; /* node id that contains xattrs */
unsigned long long xattr_ver; /* cp version of xattr modification */ unsigned long long xattr_ver; /* cp version of xattr modification */
struct extent_info ext; /* in-memory extent cache entry */
rwlock_t ext_lock; /* rwlock for single extent cache */
struct inode_entry *dirty_dir; /* the pointer of dirty dir */ struct inode_entry *dirty_dir; /* the pointer of dirty dir */
struct radix_tree_root inmem_root; /* radix tree for inmem pages */ struct radix_tree_root inmem_root; /* radix tree for inmem pages */
struct list_head inmem_pages; /* inmemory pages managed by f2fs */ struct list_head inmem_pages; /* inmemory pages managed by f2fs */
struct mutex inmem_lock; /* lock for inmemory pages */ struct mutex inmem_lock; /* lock for inmemory pages */
struct extent_tree *extent_tree; /* cached extent_tree entry */
#ifdef CONFIG_F2FS_FS_ENCRYPTION #ifdef CONFIG_F2FS_FS_ENCRYPTION
/* Encryption params */ /* Encryption params */
struct f2fs_crypt_info *i_crypt_info; struct f2fs_crypt_info *i_crypt_info;
@ -1548,6 +1549,17 @@ static inline bool is_dot_dotdot(const struct qstr *str)
return false; return false;
} }
static inline bool f2fs_may_extent_tree(struct inode *inode)
{
mode_t mode = inode->i_mode;
if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
return false;
return S_ISREG(mode);
}
#define get_inode_mode(i) \ #define get_inode_mode(i) \
((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \ ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@ -1755,10 +1767,10 @@ void set_data_blkaddr(struct dnode_of_data *);
int reserve_new_block(struct dnode_of_data *); int reserve_new_block(struct dnode_of_data *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int); unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
void f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
unsigned int f2fs_destroy_extent_node(struct inode *);
void f2fs_destroy_extent_tree(struct inode *); void f2fs_destroy_extent_tree(struct inode *);
void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *);
void f2fs_update_extent_cache(struct dnode_of_data *); void f2fs_update_extent_cache(struct dnode_of_data *);
void f2fs_preserve_extent_tree(struct inode *);
struct page *get_read_data_page(struct inode *, pgoff_t, int); struct page *get_read_data_page(struct inode *, pgoff_t, int);
struct page *find_data_page(struct inode *, pgoff_t); struct page *find_data_page(struct inode *, pgoff_t);
struct page *get_lock_data_page(struct inode *, pgoff_t); struct page *get_lock_data_page(struct inode *, pgoff_t);

View File

@ -139,7 +139,7 @@ static int do_read_inode(struct inode *inode)
fi->i_pino = le32_to_cpu(ri->i_pino); fi->i_pino = le32_to_cpu(ri->i_pino);
fi->i_dir_level = ri->i_dir_level; fi->i_dir_level = ri->i_dir_level;
f2fs_init_extent_cache(inode, &ri->i_ext); f2fs_init_extent_tree(inode, &ri->i_ext);
get_inline_info(fi, ri); get_inline_info(fi, ri);
@ -237,10 +237,11 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_size = cpu_to_le64(i_size_read(inode)); ri->i_size = cpu_to_le64(i_size_read(inode));
ri->i_blocks = cpu_to_le64(inode->i_blocks); ri->i_blocks = cpu_to_le64(inode->i_blocks);
read_lock(&F2FS_I(inode)->ext_lock); if (F2FS_I(inode)->extent_tree)
set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext); set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
read_unlock(&F2FS_I(inode)->ext_lock); &ri->i_ext);
else
memset(&ri->i_ext, 0, sizeof(ri->i_ext));
set_raw_inline(F2FS_I(inode), ri); set_raw_inline(F2FS_I(inode), ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
@ -331,6 +332,8 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_bug_on(sbi, get_dirty_pages(inode)); f2fs_bug_on(sbi, get_dirty_pages(inode));
remove_dirty_dir_inode(inode); remove_dirty_dir_inode(inode);
f2fs_destroy_extent_tree(inode);
if (inode->i_nlink || is_bad_inode(inode)) if (inode->i_nlink || is_bad_inode(inode))
goto no_delete; goto no_delete;
@ -350,11 +353,6 @@ no_delete:
stat_dec_inline_dir(inode); stat_dec_inline_dir(inode);
stat_dec_inline_inode(inode); stat_dec_inline_inode(inode);
/* update extent info in inode */
if (inode->i_nlink)
f2fs_preserve_extent_tree(inode);
f2fs_destroy_extent_tree(inode);
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino); invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
if (xnid) if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid); invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);

View File

@ -65,6 +65,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (f2fs_may_inline_dentry(inode)) if (f2fs_may_inline_dentry(inode))
set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY); set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
f2fs_init_extent_tree(inode, NULL);
stat_inc_inline_inode(inode); stat_inc_inline_inode(inode);
stat_inc_inline_dir(inode); stat_inc_inline_dir(inode);

View File

@ -117,6 +117,8 @@ void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
void f2fs_leave_shrinker(struct f2fs_sb_info *sbi) void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
{ {
f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
spin_lock(&f2fs_list_lock); spin_lock(&f2fs_list_lock);
list_del(&sbi->s_list); list_del(&sbi->s_list);
spin_unlock(&f2fs_list_lock); spin_unlock(&f2fs_list_lock);

View File

@ -422,7 +422,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
atomic_set(&fi->dirty_pages, 0); atomic_set(&fi->dirty_pages, 0);
fi->i_current_depth = 1; fi->i_current_depth = 1;
fi->i_advise = 0; fi->i_advise = 0;
rwlock_init(&fi->ext_lock);
init_rwsem(&fi->i_sem); init_rwsem(&fi->i_sem);
INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS); INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS);
INIT_LIST_HEAD(&fi->inmem_pages); INIT_LIST_HEAD(&fi->inmem_pages);
@ -453,12 +452,17 @@ static int f2fs_drop_inode(struct inode *inode)
*/ */
if (!inode_unhashed(inode) && inode->i_state & I_SYNC) { if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
if (!inode->i_nlink && !is_bad_inode(inode)) { if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
atomic_inc(&inode->i_count);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
/* some remained atomic pages should discarded */ /* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode)) if (f2fs_is_atomic_file(inode))
commit_inmem_pages(inode, true); commit_inmem_pages(inode, true);
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
sb_start_intwrite(inode->i_sb); sb_start_intwrite(inode->i_sb);
i_size_write(inode, 0); i_size_write(inode, 0);
@ -473,6 +477,7 @@ static int f2fs_drop_inode(struct inode *inode)
F2FS_I(inode)->i_crypt_info); F2FS_I(inode)->i_crypt_info);
#endif #endif
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
} }
return 0; return 0;
} }
@ -721,6 +726,7 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, BG_GC); set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_DATA); set_opt(sbi, INLINE_DATA);
set_opt(sbi, EXTENT_CACHE);
#ifdef CONFIG_F2FS_FS_XATTR #ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER); set_opt(sbi, XATTR_USER);