mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
btrfs: embed data_ref and tree_ref in btrfs_delayed_ref_node
We have been embedding btrfs_delayed_ref_node in the btrfs_delayed_data_ref and btrfs_delayed_tree_ref, and then we have two sets of cachep's and a variety of handling that is awkward because of this separation. Instead union these two members inside of btrfs_delayed_ref_node and make that the first class object. This allows us to go down to one cachep for our delayed ref nodes instead of two. Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
0eea355fc0
commit
d3fbb00f5e
@ -16,8 +16,7 @@
|
||||
#include "fs.h"
|
||||
|
||||
struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
struct kmem_cache *btrfs_delayed_data_ref_cachep;
|
||||
struct kmem_cache *btrfs_delayed_ref_node_cachep;
|
||||
struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
/*
|
||||
* delayed back reference update tracking. For subvolume trees
|
||||
@ -1082,26 +1081,26 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
|
||||
is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID);
|
||||
|
||||
ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
|
||||
if (!ref)
|
||||
node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
|
||||
record = kzalloc(sizeof(*record), GFP_NOFS);
|
||||
if (!record) {
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
node = btrfs_delayed_tree_ref_to_node(ref);
|
||||
ref = btrfs_delayed_node_to_tree_ref(node);
|
||||
|
||||
if (parent)
|
||||
ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
|
||||
@ -1143,7 +1142,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
|
||||
action == BTRFS_ADD_DELAYED_EXTENT ?
|
||||
BTRFS_ADD_DELAYED_REF : action);
|
||||
if (merged)
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
|
||||
|
||||
if (qrecord_inserted)
|
||||
btrfs_qgroup_trace_extent_post(trans, record);
|
||||
@ -1176,11 +1175,11 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
|
||||
u8 ref_type;
|
||||
|
||||
ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
|
||||
if (!ref)
|
||||
node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
node = btrfs_delayed_data_ref_to_node(ref);
|
||||
ref = btrfs_delayed_node_to_data_ref(node);
|
||||
|
||||
if (parent)
|
||||
ref_type = BTRFS_SHARED_DATA_REF_KEY;
|
||||
@ -1196,14 +1195,14 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
|
||||
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
|
||||
record = kzalloc(sizeof(*record), GFP_NOFS);
|
||||
if (!record) {
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep,
|
||||
head_ref);
|
||||
return -ENOMEM;
|
||||
@ -1237,7 +1236,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
|
||||
action == BTRFS_ADD_DELAYED_EXTENT ?
|
||||
BTRFS_ADD_DELAYED_REF : action);
|
||||
if (merged)
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
|
||||
|
||||
|
||||
if (qrecord_inserted)
|
||||
@ -1280,18 +1279,7 @@ void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
|
||||
{
|
||||
if (refcount_dec_and_test(&ref->refs)) {
|
||||
WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
|
||||
switch (ref->type) {
|
||||
case BTRFS_TREE_BLOCK_REF_KEY:
|
||||
case BTRFS_SHARED_BLOCK_REF_KEY:
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
break;
|
||||
case BTRFS_EXTENT_DATA_REF_KEY:
|
||||
case BTRFS_SHARED_DATA_REF_KEY:
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1310,8 +1298,7 @@ btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 byt
|
||||
void __cold btrfs_delayed_ref_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
|
||||
kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
|
||||
kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
|
||||
kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
|
||||
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
|
||||
}
|
||||
|
||||
@ -1321,12 +1308,8 @@ int __init btrfs_delayed_ref_init(void)
|
||||
if (!btrfs_delayed_ref_head_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_tree_ref_cachep = KMEM_CACHE(btrfs_delayed_tree_ref, 0);
|
||||
if (!btrfs_delayed_tree_ref_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_data_ref_cachep = KMEM_CACHE(btrfs_delayed_data_ref, 0);
|
||||
if (!btrfs_delayed_data_ref_cachep)
|
||||
btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
|
||||
if (!btrfs_delayed_ref_node_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
|
||||
|
@ -30,6 +30,19 @@ enum btrfs_delayed_ref_action {
|
||||
BTRFS_UPDATE_DELAYED_HEAD,
|
||||
} __packed;
|
||||
|
||||
struct btrfs_delayed_tree_ref {
|
||||
u64 root;
|
||||
u64 parent;
|
||||
int level;
|
||||
};
|
||||
|
||||
struct btrfs_delayed_data_ref {
|
||||
u64 root;
|
||||
u64 parent;
|
||||
u64 objectid;
|
||||
u64 offset;
|
||||
};
|
||||
|
||||
struct btrfs_delayed_ref_node {
|
||||
struct rb_node ref_node;
|
||||
/*
|
||||
@ -64,6 +77,11 @@ struct btrfs_delayed_ref_node {
|
||||
|
||||
unsigned int action:8;
|
||||
unsigned int type:8;
|
||||
|
||||
union {
|
||||
struct btrfs_delayed_tree_ref tree_ref;
|
||||
struct btrfs_delayed_data_ref data_ref;
|
||||
};
|
||||
};
|
||||
|
||||
struct btrfs_delayed_extent_op {
|
||||
@ -151,21 +169,6 @@ struct btrfs_delayed_ref_head {
|
||||
bool processing;
|
||||
};
|
||||
|
||||
struct btrfs_delayed_tree_ref {
|
||||
struct btrfs_delayed_ref_node node;
|
||||
u64 root;
|
||||
u64 parent;
|
||||
int level;
|
||||
};
|
||||
|
||||
struct btrfs_delayed_data_ref {
|
||||
struct btrfs_delayed_ref_node node;
|
||||
u64 root;
|
||||
u64 parent;
|
||||
u64 objectid;
|
||||
u64 offset;
|
||||
};
|
||||
|
||||
enum btrfs_delayed_ref_flags {
|
||||
/* Indicate that we are flushing delayed refs for the commit */
|
||||
BTRFS_DELAYED_REFS_FLUSHING,
|
||||
@ -279,8 +282,7 @@ struct btrfs_ref {
|
||||
};
|
||||
|
||||
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_ref_node_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
|
||||
int __init btrfs_delayed_ref_init(void);
|
||||
@ -404,25 +406,25 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
|
||||
static inline struct btrfs_delayed_tree_ref *
|
||||
btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
|
||||
{
|
||||
return container_of(node, struct btrfs_delayed_tree_ref, node);
|
||||
return &node->tree_ref;
|
||||
}
|
||||
|
||||
static inline struct btrfs_delayed_data_ref *
|
||||
btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
|
||||
{
|
||||
return container_of(node, struct btrfs_delayed_data_ref, node);
|
||||
return &node->data_ref;
|
||||
}
|
||||
|
||||
static inline struct btrfs_delayed_ref_node *
|
||||
btrfs_delayed_tree_ref_to_node(struct btrfs_delayed_tree_ref *ref)
|
||||
{
|
||||
return &ref->node;
|
||||
return container_of(ref, struct btrfs_delayed_ref_node, tree_ref);
|
||||
}
|
||||
|
||||
static inline struct btrfs_delayed_ref_node *
|
||||
btrfs_delayed_data_ref_to_node(struct btrfs_delayed_data_ref *ref)
|
||||
{
|
||||
return &ref->node;
|
||||
return container_of(ref, struct btrfs_delayed_ref_node, data_ref);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user