mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
f2fs: extent cache: support unaligned extent
Compressed inode may suffer read performance issue due to it can not use extent cache, so I propose to add this unaligned extent support to improve it. Currently, it only works in readonly format f2fs image. Unaligned extent: in one compressed cluster, physical block number will be less than logical block number, so we add an extra physical block length in extent info in order to indicate such extent status. The idea is if one whole cluster blocks are contiguous physically, once its mapping info was readed at first time, we will cache an unaligned (or aligned) extent info entry in extent cache, it expects that the mapping info will be hitted when rereading cluster. Merge policy: - Aligned extents can be merged. - Aligned extent and unaligned extent can not be merged. Signed-off-by: Chao Yu <chao@kernel.org> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
6b3ba1e77d
commit
94afd6d6e5
@ -1666,6 +1666,30 @@ void f2fs_put_page_dic(struct page *page)
|
||||
f2fs_put_dic(dic);
|
||||
}
|
||||
|
||||
/*
|
||||
* check whether cluster blocks are contiguous, and add extent cache entry
|
||||
* only if cluster blocks are logically and physically contiguous.
|
||||
*/
|
||||
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
|
||||
{
|
||||
bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
|
||||
int i = compressed ? 1 : 0;
|
||||
block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
||||
dn->ofs_in_node + i);
|
||||
|
||||
for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
|
||||
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
||||
dn->ofs_in_node + i);
|
||||
|
||||
if (!__is_valid_data_blkaddr(blkaddr))
|
||||
break;
|
||||
if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return compressed ? i - 1 : i;
|
||||
}
|
||||
|
||||
const struct address_space_operations f2fs_compress_aops = {
|
||||
.releasepage = f2fs_release_page,
|
||||
.invalidatepage = f2fs_invalidate_page,
|
||||
|
@ -1135,7 +1135,7 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
|
||||
|
||||
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
|
||||
{
|
||||
struct extent_info ei = {0, 0, 0};
|
||||
struct extent_info ei = {0, };
|
||||
struct inode *inode = dn->inode;
|
||||
|
||||
if (f2fs_lookup_extent_cache(inode, index, &ei)) {
|
||||
@ -1152,7 +1152,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct dnode_of_data dn;
|
||||
struct page *page;
|
||||
struct extent_info ei = {0,0,0};
|
||||
struct extent_info ei = {0, };
|
||||
int err;
|
||||
|
||||
page = f2fs_grab_cache_page(mapping, index, for_write);
|
||||
@ -1450,7 +1450,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
|
||||
int err = 0, ofs = 1;
|
||||
unsigned int ofs_in_node, last_ofs_in_node;
|
||||
blkcnt_t prealloc;
|
||||
struct extent_info ei = {0,0,0};
|
||||
struct extent_info ei = {0, };
|
||||
block_t blkaddr;
|
||||
unsigned int start_pgofs;
|
||||
|
||||
@ -2126,6 +2126,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
sector_t last_block_in_file;
|
||||
const unsigned blocksize = blks_to_bytes(inode, 1);
|
||||
struct decompress_io_ctx *dic = NULL;
|
||||
struct extent_info ei = {0, };
|
||||
bool from_dnode = true;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
@ -2156,6 +2158,12 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
if (f2fs_cluster_is_empty(cc))
|
||||
goto out;
|
||||
|
||||
if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
|
||||
from_dnode = false;
|
||||
|
||||
if (!from_dnode)
|
||||
goto skip_reading_dnode;
|
||||
|
||||
set_new_dnode(&dn, inode, NULL, NULL, 0);
|
||||
ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
|
||||
if (ret)
|
||||
@ -2163,11 +2171,13 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
|
||||
f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
|
||||
|
||||
skip_reading_dnode:
|
||||
for (i = 1; i < cc->cluster_size; i++) {
|
||||
block_t blkaddr;
|
||||
|
||||
blkaddr = data_blkaddr(dn.inode, dn.node_page,
|
||||
dn.ofs_in_node + i);
|
||||
blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
|
||||
dn.ofs_in_node + i) :
|
||||
ei.blk + i - 1;
|
||||
|
||||
if (!__is_valid_data_blkaddr(blkaddr))
|
||||
break;
|
||||
@ -2177,6 +2187,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
goto out_put_dnode;
|
||||
}
|
||||
cc->nr_cpages++;
|
||||
|
||||
if (!from_dnode && i >= ei.c_len)
|
||||
break;
|
||||
}
|
||||
|
||||
/* nothing to decompress */
|
||||
@ -2196,8 +2209,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
block_t blkaddr;
|
||||
struct bio_post_read_ctx *ctx;
|
||||
|
||||
blkaddr = data_blkaddr(dn.inode, dn.node_page,
|
||||
dn.ofs_in_node + i + 1);
|
||||
blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
|
||||
dn.ofs_in_node + i + 1) :
|
||||
ei.blk + i;
|
||||
|
||||
f2fs_wait_on_block_writeback(inode, blkaddr);
|
||||
|
||||
@ -2242,13 +2256,15 @@ submit_and_realloc:
|
||||
*last_block_in_bio = blkaddr;
|
||||
}
|
||||
|
||||
f2fs_put_dnode(&dn);
|
||||
if (from_dnode)
|
||||
f2fs_put_dnode(&dn);
|
||||
|
||||
*bio_ret = bio;
|
||||
return 0;
|
||||
|
||||
out_put_dnode:
|
||||
f2fs_put_dnode(&dn);
|
||||
if (from_dnode)
|
||||
f2fs_put_dnode(&dn);
|
||||
out:
|
||||
for (i = 0; i < cc->cluster_size; i++) {
|
||||
if (cc->rpages[i]) {
|
||||
@ -2543,7 +2559,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
|
||||
struct page *page = fio->page;
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct dnode_of_data dn;
|
||||
struct extent_info ei = {0,0,0};
|
||||
struct extent_info ei = {0, };
|
||||
struct node_info ni;
|
||||
bool ipu_force = false;
|
||||
int err = 0;
|
||||
@ -3218,7 +3234,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
|
||||
struct dnode_of_data dn;
|
||||
struct page *ipage;
|
||||
bool locked = false;
|
||||
struct extent_info ei = {0,0,0};
|
||||
struct extent_info ei = {0, };
|
||||
int err = 0;
|
||||
int flag;
|
||||
|
||||
|
@ -661,6 +661,47 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
void f2fs_update_extent_tree_range_compressed(struct inode *inode,
|
||||
pgoff_t fofs, block_t blkaddr, unsigned int llen,
|
||||
unsigned int c_len)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
struct extent_tree *et = F2FS_I(inode)->extent_tree;
|
||||
struct extent_node *en = NULL;
|
||||
struct extent_node *prev_en = NULL, *next_en = NULL;
|
||||
struct extent_info ei;
|
||||
struct rb_node **insert_p = NULL, *insert_parent = NULL;
|
||||
bool leftmost = false;
|
||||
|
||||
trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen);
|
||||
|
||||
/* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
|
||||
if (is_inode_flag_set(inode, FI_NO_EXTENT))
|
||||
return;
|
||||
|
||||
write_lock(&et->lock);
|
||||
|
||||
en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
|
||||
(struct rb_entry *)et->cached_en, fofs,
|
||||
(struct rb_entry **)&prev_en,
|
||||
(struct rb_entry **)&next_en,
|
||||
&insert_p, &insert_parent, false,
|
||||
&leftmost);
|
||||
if (en)
|
||||
goto unlock_out;
|
||||
|
||||
set_extent_info(&ei, fofs, blkaddr, llen);
|
||||
ei.c_len = c_len;
|
||||
|
||||
if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
|
||||
__insert_extent_tree(sbi, et, &ei,
|
||||
insert_p, insert_parent, leftmost);
|
||||
unlock_out:
|
||||
write_unlock(&et->lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
|
||||
{
|
||||
struct extent_tree *et, *next;
|
||||
|
@ -580,6 +580,9 @@ struct extent_info {
|
||||
unsigned int fofs; /* start offset in a file */
|
||||
unsigned int len; /* length of the extent */
|
||||
u32 blk; /* start block address of the extent */
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
unsigned int c_len; /* physical extent length of compressed blocks */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct extent_node {
|
||||
@ -799,6 +802,9 @@ static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
|
||||
ei->fofs = fofs;
|
||||
ei->blk = blk;
|
||||
ei->len = len;
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
ei->c_len = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool __is_discard_mergeable(struct discard_info *back,
|
||||
@ -823,6 +829,12 @@ static inline bool __is_discard_front_mergeable(struct discard_info *cur,
|
||||
static inline bool __is_extent_mergeable(struct extent_info *back,
|
||||
struct extent_info *front)
|
||||
{
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (back->c_len && back->len != back->c_len)
|
||||
return false;
|
||||
if (front->c_len && front->len != front->c_len)
|
||||
return false;
|
||||
#endif
|
||||
return (back->fofs + back->len == front->fofs &&
|
||||
back->blk + back->len == front->blk);
|
||||
}
|
||||
@ -4068,12 +4080,16 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
|
||||
struct writeback_control *wbc,
|
||||
enum iostat_type io_type);
|
||||
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
|
||||
void f2fs_update_extent_tree_range_compressed(struct inode *inode,
|
||||
pgoff_t fofs, block_t blkaddr, unsigned int llen,
|
||||
unsigned int c_len);
|
||||
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
unsigned nr_pages, sector_t *last_block_in_bio,
|
||||
bool is_readahead, bool for_write);
|
||||
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
|
||||
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
|
||||
void f2fs_put_page_dic(struct page *page);
|
||||
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
|
||||
int f2fs_init_compress_ctx(struct compress_ctx *cc);
|
||||
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
|
||||
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
|
||||
@ -4128,6 +4144,7 @@ static inline void f2fs_put_page_dic(struct page *page)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
|
||||
static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
|
||||
static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
|
||||
static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
|
||||
@ -4143,6 +4160,9 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
|
||||
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
|
||||
nid_t ino) { }
|
||||
#define inc_compr_inode_stat(inode) do { } while (0)
|
||||
static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
|
||||
pgoff_t fofs, block_t blkaddr, unsigned int llen,
|
||||
unsigned int c_len) { }
|
||||
#endif
|
||||
|
||||
static inline void set_compress_context(struct inode *inode)
|
||||
|
@ -841,6 +841,26 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
|
||||
dn->ofs_in_node = offset[level];
|
||||
dn->node_page = npage[level];
|
||||
dn->data_blkaddr = f2fs_data_blkaddr(dn);
|
||||
|
||||
if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
|
||||
f2fs_sb_has_readonly(sbi)) {
|
||||
unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
|
||||
block_t blkaddr;
|
||||
|
||||
if (!c_len)
|
||||
goto out;
|
||||
|
||||
blkaddr = f2fs_data_blkaddr(dn);
|
||||
if (blkaddr == COMPRESS_ADDR)
|
||||
blkaddr = data_blkaddr(dn->inode, dn->node_page,
|
||||
dn->ofs_in_node + 1);
|
||||
|
||||
f2fs_update_extent_tree_range_compressed(dn->inode,
|
||||
index, blkaddr,
|
||||
F2FS_I(dn->inode)->i_cluster_size,
|
||||
c_len);
|
||||
}
|
||||
out:
|
||||
return 0;
|
||||
|
||||
release_pages:
|
||||
|
Loading…
Reference in New Issue
Block a user