btrfs: add nesting tags to the locking helpers

We will need these when we switch to an rwsem, so plumb in the
infrastructure here to use later on.  I violate the 80 character limit
some here because it'll be cleaned up later.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2020-08-20 11:46:02 -04:00 committed by David Sterba
parent 51899412dd
commit fd7ba1c120
3 changed files with 42 additions and 8 deletions

View File

@ -2875,7 +2875,8 @@ cow_done:
} else {
if (!btrfs_tree_read_lock_atomic(b)) {
btrfs_set_path_blocking(p);
__btrfs_tree_read_lock(b, p->recurse);
__btrfs_tree_read_lock(b, BTRFS_NESTING_NORMAL,
p->recurse);
}
p->locks[level] = BTRFS_READ_LOCK;
}
@ -5453,7 +5454,9 @@ again:
}
if (!ret) {
btrfs_set_path_blocking(path);
__btrfs_tree_read_lock(next, path->recurse);
__btrfs_tree_read_lock(next,
BTRFS_NESTING_NORMAL,
path->recurse);
}
next_rw_lock = BTRFS_READ_LOCK;
}
@ -5488,7 +5491,9 @@ again:
ret = btrfs_try_tree_read_lock(next);
if (!ret) {
btrfs_set_path_blocking(path);
__btrfs_tree_read_lock(next, path->recurse);
__btrfs_tree_read_lock(next,
BTRFS_NESTING_NORMAL,
path->recurse);
}
next_rw_lock = BTRFS_READ_LOCK;
}

View File

@ -244,7 +244,8 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
*
* The rwlock is held upon exit.
*/
void __btrfs_tree_read_lock(struct extent_buffer *eb, bool recurse)
void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
bool recurse)
{
u64 start_ns = 0;
@ -282,7 +283,7 @@ again:
void btrfs_tree_read_lock(struct extent_buffer *eb)
{
__btrfs_tree_read_lock(eb, false);
__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, false);
}
/*
@ -415,7 +416,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
*
* The rwlock is held for write upon exit.
*/
void btrfs_tree_lock(struct extent_buffer *eb)
void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
__acquires(&eb->lock)
{
u64 start_ns = 0;
@ -440,6 +441,11 @@ again:
trace_btrfs_tree_lock(eb, start_ns);
}
void btrfs_tree_lock(struct extent_buffer *eb)
{
__btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
}
/*
* Release the write lock, either blocking or spinning (ie. there's no need
* for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
@ -565,7 +571,7 @@ struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root,
while (1) {
eb = btrfs_root_node(root);
__btrfs_tree_read_lock(eb, recurse);
__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, recurse);
if (eb == root->node)
break;
btrfs_tree_read_unlock(eb);

View File

@ -16,12 +16,35 @@
#define BTRFS_WRITE_LOCK_BLOCKING 3
#define BTRFS_READ_LOCK_BLOCKING 4
/*
* We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
* the time of this patch is 8, which is how many we use. Keep this in mind if
* you decide you want to add another subclass.
*/
enum btrfs_lock_nesting {
BTRFS_NESTING_NORMAL,
/*
* We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
* add this in here and add a static_assert to keep us from going over
* the limit. As of this writing we're limited to 8, and we're
* definitely using 8, hence this check to keep us from messing up in
* the future.
*/
BTRFS_NESTING_MAX,
};
static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
"too many lock subclasses defined");
struct btrfs_path;
void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
void btrfs_tree_lock(struct extent_buffer *eb);
void btrfs_tree_unlock(struct extent_buffer *eb);
void __btrfs_tree_read_lock(struct extent_buffer *eb, bool recurse);
void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
bool recurse);
void btrfs_tree_read_lock(struct extent_buffer *eb);
void btrfs_tree_read_unlock(struct extent_buffer *eb);
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);