2007-06-12 21:07:21 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
2007-02-02 22:18:22 +08:00
|
|
|
#ifndef __DISKIO__
|
|
|
|
#define __DISKIO__
|
|
|
|
|
2008-12-09 05:46:26 +08:00
|
|
|
#define BTRFS_SUPER_INFO_OFFSET (64 * 1024)
|
2008-04-11 04:19:33 +08:00
|
|
|
#define BTRFS_SUPER_INFO_SIZE 4096
|
2008-12-09 05:46:26 +08:00
|
|
|
|
|
|
|
#define BTRFS_SUPER_MIRROR_MAX 3
|
|
|
|
#define BTRFS_SUPER_MIRROR_SHIFT 12
|
|
|
|
|
|
|
|
static inline u64 btrfs_sb_offset(int mirror)
|
|
|
|
{
|
|
|
|
u64 start = 16 * 1024;
|
|
|
|
if (mirror)
|
|
|
|
return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
|
|
|
|
return BTRFS_SUPER_INFO_OFFSET;
|
|
|
|
}
|
|
|
|
|
2008-03-25 03:01:56 +08:00
|
|
|
struct btrfs_device;
|
2008-03-25 03:02:07 +08:00
|
|
|
struct btrfs_fs_devices;
|
2007-03-23 00:13:20 +08:00
|
|
|
|
2007-10-16 04:15:53 +08:00
|
|
|
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
|
2008-05-13 00:59:19 +08:00
|
|
|
u32 blocksize, u64 parent_transid);
|
|
|
|
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
|
|
|
|
u64 parent_transid);
|
2011-05-23 20:25:41 +08:00
|
|
|
int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
|
|
|
|
int mirror_num, struct extent_buffer **eb);
|
2007-10-16 04:14:19 +08:00
|
|
|
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
|
2007-10-16 04:15:53 +08:00
|
|
|
u64 bytenr, u32 blocksize);
|
2012-03-01 21:56:27 +08:00
|
|
|
void clean_tree_block(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root, struct extent_buffer *buf);
|
2011-11-17 14:10:02 +08:00
|
|
|
int open_ctree(struct super_block *sb,
|
|
|
|
struct btrfs_fs_devices *fs_devices,
|
|
|
|
char *options);
|
2007-03-23 00:13:20 +08:00
|
|
|
int close_ctree(struct btrfs_root *root);
|
2007-03-23 03:59:16 +08:00
|
|
|
int write_ctree_super(struct btrfs_trans_handle *trans,
|
2008-12-09 05:46:26 +08:00
|
|
|
struct btrfs_root *root, int max_mirrors);
|
|
|
|
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
|
2008-11-13 03:34:12 +08:00
|
|
|
int btrfs_commit_super(struct btrfs_root *root);
|
2011-01-06 19:30:25 +08:00
|
|
|
int btrfs_error_commit_super(struct btrfs_root *root);
|
2007-10-16 04:14:19 +08:00
|
|
|
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
|
2007-10-16 04:15:53 +08:00
|
|
|
u64 bytenr, u32 blocksize);
|
2008-09-06 04:13:11 +08:00
|
|
|
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
|
2007-06-23 02:16:25 +08:00
|
|
|
struct btrfs_key *location);
|
2007-12-22 05:27:24 +08:00
|
|
|
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_key *location);
|
2008-11-13 03:34:12 +08:00
|
|
|
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
|
2007-09-17 22:58:06 +08:00
|
|
|
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
|
btrfs: implement delayed inode items operation
Changelog V5 -> V6:
- Fix oom when the memory load is high, by storing the delayed nodes into the
root's radix tree, and letting btrfs inodes go.
Changelog V4 -> V5:
- Fix the race on adding the delayed node to the inode, which is spotted by
Chris Mason.
- Merge Chris Mason's incremental patch into this patch.
- Fix deadlock between readdir() and memory fault, which is reported by
Itaru Kitayama.
Changelog V3 -> V4:
- Fix nested lock, which is reported by Itaru Kitayama, by updating space cache
inode in time.
Changelog V2 -> V3:
- Fix the race between the delayed worker and the task which does delayed items
balance, which is reported by Tsutomu Itoh.
- Modify the patch address David Sterba's comment.
- Fix the bug of the cpu recursion spinlock, reported by Chris Mason
Changelog V1 -> V2:
- break up the global rb-tree, use a list to manage the delayed nodes,
which is created for every directory and file, and used to manage the
delayed directory name index items and the delayed inode item.
- introduce a worker to deal with the delayed nodes.
Compare with Ext3/4, the performance of file creation and deletion on btrfs
is very poor. the reason is that btrfs must do a lot of b+ tree insertions,
such as inode item, directory name item, directory name index and so on.
If we can do some delayed b+ tree insertion or deletion, we can improve the
performance, so we made this patch which implemented delayed directory name
index insertion/deletion and delayed inode update.
Implementation:
- introduce a delayed root object into the filesystem, that use two lists to
manage the delayed nodes which are created for every file/directory.
One is used to manage all the delayed nodes that have delayed items. And the
other is used to manage the delayed nodes which is waiting to be dealt with
by the work thread.
- Every delayed node has two rb-tree, one is used to manage the directory name
index which is going to be inserted into b+ tree, and the other is used to
manage the directory name index which is going to be deleted from b+ tree.
- introduce a worker to deal with the delayed operation. This worker is used
to deal with the works of the delayed directory name index items insertion
and deletion and the delayed inode update.
When the delayed items is beyond the lower limit, we create works for some
delayed nodes and insert them into the work queue of the worker, and then
go back.
When the delayed items is beyond the upper bound, we create works for all
the delayed nodes that haven't been dealt with, and insert them into the work
queue of the worker, and then wait for that the untreated items is below some
threshold value.
- When we want to insert a directory name index into b+ tree, we just add the
information into the delayed inserting rb-tree.
And then we check the number of the delayed items and do delayed items
balance. (The balance policy is above.)
- When we want to delete a directory name index from the b+ tree, we search it
in the inserting rb-tree at first. If we look it up, just drop it. If not,
add the key of it into the delayed deleting rb-tree.
Similar to the delayed inserting rb-tree, we also check the number of the
delayed items and do delayed items balance.
(The same to inserting manipulation)
- When we want to update the metadata of some inode, we cached the data of the
inode into the delayed node. the worker will flush it into the b+ tree after
dealing with the delayed insertion and deletion.
- We will move the delayed node to the tail of the list after we access the
delayed node, By this way, we can cache more delayed items and merge more
inode updates.
- If we want to commit transaction, we will deal with all the delayed node.
- the delayed node will be freed when we free the btrfs inode.
- Before we log the inode items, we commit all the directory name index items
and the delayed inode update.
I did a quick test by the benchmark tool[1] and found we can improve the
performance of file creation by ~15%, and file deletion by ~20%.
Before applying this patch:
Create files:
Total files: 50000
Total time: 1.096108
Average time: 0.000022
Delete files:
Total files: 50000
Total time: 1.510403
Average time: 0.000030
After applying this patch:
Create files:
Total files: 50000
Total time: 0.932899
Average time: 0.000019
Delete files:
Total files: 50000
Total time: 1.215732
Average time: 0.000024
[1] http://marc.info/?l=linux-btrfs&m=128212635122920&q=p3
Many thanks for Kitayama-san's help!
Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Reviewed-by: David Sterba <dave@jikos.cz>
Tested-by: Tsutomu Itoh <t-itoh@jp.fujitsu.com>
Tested-by: Itaru Kitayama <kitayama@cl.bb4u.ne.jp>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2011-04-22 18:12:22 +08:00
|
|
|
void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
|
2012-03-01 21:56:26 +08:00
|
|
|
void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
|
2007-10-16 04:14:19 +08:00
|
|
|
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
|
2012-05-06 19:23:47 +08:00
|
|
|
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
|
|
|
int atomic);
|
2007-10-16 04:14:19 +08:00
|
|
|
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
|
2008-05-13 00:59:19 +08:00
|
|
|
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
|
2007-10-16 04:22:25 +08:00
|
|
|
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
|
|
|
|
void btrfs_csum_final(u32 crc, char *result);
|
2008-04-10 04:28:12 +08:00
|
|
|
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
|
|
|
int metadata);
|
2008-04-16 23:14:51 +08:00
|
|
|
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
|
|
|
int rw, struct bio *bio, int mirror_num,
|
2010-05-25 21:48:28 +08:00
|
|
|
unsigned long bio_flags, u64 bio_offset,
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 11:03:00 +08:00
|
|
|
extent_submit_bio_hook_t *submit_bio_start,
|
|
|
|
extent_submit_bio_hook_t *submit_bio_done);
|
2008-08-21 01:39:41 +08:00
|
|
|
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
|
2008-09-06 04:13:11 +08:00
|
|
|
int btrfs_write_tree_block(struct extent_buffer *buf);
|
|
|
|
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
|
|
|
|
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info);
|
2009-01-22 01:54:03 +08:00
|
|
|
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_root *root);
|
2012-03-02 00:24:58 +08:00
|
|
|
int btrfs_cleanup_transaction(struct btrfs_root *root);
|
|
|
|
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
|
|
|
|
struct btrfs_root *root);
|
2011-09-13 18:44:20 +08:00
|
|
|
void btrfs_abort_devices(struct btrfs_root *root);
|
|
|
|
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info,
|
|
|
|
u64 objectid);
|
|
|
|
int btree_lock_page_hook(struct page *page, void *data,
|
|
|
|
void (*flush_fn)(void *));
|
2009-02-13 03:09:45 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
2011-07-27 04:11:19 +08:00
|
|
|
void btrfs_init_lockdep(void);
|
|
|
|
void btrfs_set_buffer_lockdep_class(u64 objectid,
|
|
|
|
struct extent_buffer *eb, int level);
|
2009-02-13 03:09:45 +08:00
|
|
|
#else
|
2011-07-27 04:11:19 +08:00
|
|
|
static inline void btrfs_init_lockdep(void)
|
|
|
|
{ }
|
|
|
|
static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
|
|
|
|
struct extent_buffer *eb, int level)
|
2009-02-13 03:09:45 +08:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2007-02-02 22:18:22 +08:00
|
|
|
#endif
|