mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
Btrfs: Add a mount option to control worker thread pool size
mount -o thread_pool_size changes the default, which is min(num_cpus + 2, 8). Larger thread pools would make more sense on very large disk arrays. This mount option controls the max size of each thread pool. There are multiple thread pools, so the total worker count will be larger than the mount option. Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
parent
35d8ba6629
commit
4543df7ecc
@ -532,6 +532,7 @@ struct btrfs_fs_info {
|
||||
*/
|
||||
struct btrfs_workers workers;
|
||||
struct btrfs_workers endio_workers;
|
||||
int thread_pool_size;
|
||||
|
||||
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
|
||||
struct work_struct trans_work;
|
||||
|
@ -1117,6 +1117,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||
GFP_NOFS);
|
||||
int ret;
|
||||
int err = -EINVAL;
|
||||
|
||||
struct btrfs_super_block *disk_super;
|
||||
|
||||
if (!extent_root || !tree_root || !fs_info) {
|
||||
@ -1148,6 +1149,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||
fs_info->btree_inode = new_inode(sb);
|
||||
fs_info->btree_inode->i_ino = 1;
|
||||
fs_info->btree_inode->i_nlink = 1;
|
||||
fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
|
||||
|
||||
sb->s_blocksize = 4096;
|
||||
sb->s_blocksize_bits = blksize_bits(4096);
|
||||
@ -1195,19 +1197,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||
mutex_init(&fs_info->trans_mutex);
|
||||
mutex_init(&fs_info->fs_mutex);
|
||||
|
||||
/* we need to start all the end_io workers up front because the
|
||||
* queue work function gets called at interrupt time. The endio
|
||||
* workers don't normally start IO, so some number of them <= the
|
||||
* number of cpus is fine. They handle checksumming after a read.
|
||||
*
|
||||
* The other worker threads do start IO, so the max is larger than
|
||||
* the number of CPUs. FIXME, tune this for huge machines
|
||||
*/
|
||||
btrfs_init_workers(&fs_info->workers, num_online_cpus() * 2);
|
||||
btrfs_init_workers(&fs_info->endio_workers, num_online_cpus());
|
||||
btrfs_start_workers(&fs_info->workers, 1);
|
||||
btrfs_start_workers(&fs_info->endio_workers, num_online_cpus());
|
||||
|
||||
#if 0
|
||||
ret = add_hasher(fs_info, "crc32c");
|
||||
if (ret) {
|
||||
@ -1238,6 +1227,17 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
||||
if (err)
|
||||
goto fail_sb_buffer;
|
||||
|
||||
/*
|
||||
* we need to start all the end_io workers up front because the
|
||||
* queue work function gets called at interrupt time, and so it
|
||||
* cannot dynamically grow.
|
||||
*/
|
||||
btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
|
||||
btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
|
||||
btrfs_start_workers(&fs_info->workers, 1);
|
||||
btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
|
||||
|
||||
|
||||
err = -EINVAL;
|
||||
if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
|
||||
printk("Btrfs: wanted %llu devices, but found %llu\n",
|
||||
@ -1341,10 +1341,10 @@ fail_sys_array:
|
||||
mutex_unlock(&fs_info->fs_mutex);
|
||||
fail_sb_buffer:
|
||||
extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
|
||||
fail_iput:
|
||||
iput(fs_info->btree_inode);
|
||||
btrfs_stop_workers(&fs_info->workers);
|
||||
btrfs_stop_workers(&fs_info->endio_workers);
|
||||
fail_iput:
|
||||
iput(fs_info->btree_inode);
|
||||
fail:
|
||||
btrfs_close_devices(fs_info->fs_devices);
|
||||
btrfs_mapping_tree_free(&fs_info->mapping_tree);
|
||||
|
@ -67,7 +67,7 @@ static void btrfs_put_super (struct super_block * sb)
|
||||
enum {
|
||||
Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
|
||||
Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
|
||||
Opt_ssd, Opt_err,
|
||||
Opt_ssd, Opt_thread_pool, Opt_err,
|
||||
};
|
||||
|
||||
static match_table_t tokens = {
|
||||
@ -80,6 +80,7 @@ static match_table_t tokens = {
|
||||
{Opt_max_extent, "max_extent=%s"},
|
||||
{Opt_max_inline, "max_inline=%s"},
|
||||
{Opt_alloc_start, "alloc_start=%s"},
|
||||
{Opt_thread_pool, "thread_pool=%d"},
|
||||
{Opt_ssd, "ssd"},
|
||||
{Opt_err, NULL}
|
||||
};
|
||||
@ -118,6 +119,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
|
||||
struct btrfs_fs_info *info = root->fs_info;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
char *p, *num;
|
||||
int intarg;
|
||||
|
||||
if (!options)
|
||||
return 0;
|
||||
@ -166,6 +168,15 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
|
||||
printk(KERN_INFO "btrfs: turning off barriers\n");
|
||||
btrfs_set_opt(info->mount_opt, NOBARRIER);
|
||||
break;
|
||||
case Opt_thread_pool:
|
||||
intarg = 0;
|
||||
match_int(&args[0], &intarg);
|
||||
if (intarg) {
|
||||
info->thread_pool_size = intarg;
|
||||
printk(KERN_INFO "btrfs: thread pool %d\n",
|
||||
info->thread_pool_size);
|
||||
}
|
||||
break;
|
||||
case Opt_max_extent:
|
||||
num = match_strdup(&args[0]);
|
||||
if (num) {
|
||||
|
Loading…
Reference in New Issue
Block a user