mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
bcachefs: Better use of locking helpers
Held btree locks are tracked in btree_path->nodes_locked and btree_path->nodes_intent_locked. Upcoming patches are going to change the representation in struct btree_path, so this patch switches to proper helpers instead of direct access to these fields. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
parent
8e5696698d
commit
d4263e5638
@ -348,7 +348,7 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
|
||||
if (cmp < 0)
|
||||
continue;
|
||||
|
||||
if (!(path->nodes_locked & 1) ||
|
||||
if (!btree_node_locked(path, 0) ||
|
||||
!path->should_be_locked)
|
||||
continue;
|
||||
|
||||
@ -3053,8 +3053,8 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
|
||||
for (l = 0; l < BTREE_MAX_DEPTH; l++) {
|
||||
if (btree_node_locked(path, l) &&
|
||||
!IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
|
||||
prt_printf(out, " %s l=%u ",
|
||||
btree_node_intent_locked(path, l) ? "i" : "r", l);
|
||||
prt_printf(out, " %c l=%u ",
|
||||
lock_types[btree_node_locked_type(path, l)], l);
|
||||
bch2_btree_path_node_to_text(out, b, path->cached);
|
||||
prt_printf(out, "\n");
|
||||
}
|
||||
|
@ -61,6 +61,16 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
|
||||
six_lock_readers_add(&b->c.lock, readers);
|
||||
}
|
||||
|
||||
static inline bool path_has_read_locks(struct btree_path *path)
|
||||
{
|
||||
unsigned l;
|
||||
|
||||
for (l = 0; l < BTREE_MAX_DEPTH; l++)
|
||||
if (btree_node_read_locked(path, l))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Slowpath: */
|
||||
int __bch2_btree_node_lock(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
@ -91,7 +101,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
|
||||
* already have read locked - deadlock:
|
||||
*/
|
||||
if (type == SIX_LOCK_intent &&
|
||||
linked->nodes_locked != linked->nodes_intent_locked) {
|
||||
path_has_read_locks(linked)) {
|
||||
reason = 1;
|
||||
goto deadlock;
|
||||
}
|
||||
@ -121,7 +131,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
|
||||
* another path has possible descendants locked of the node
|
||||
* we're about to lock, it must have the ancestors locked too:
|
||||
*/
|
||||
if (level > __fls(linked->nodes_locked)) {
|
||||
if (level > btree_path_highest_level_locked(linked)) {
|
||||
reason = 5;
|
||||
goto deadlock;
|
||||
}
|
||||
@ -254,7 +264,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
|
||||
trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
|
||||
return false;
|
||||
success:
|
||||
mark_btree_node_intent_locked(trans, path, level);
|
||||
mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -370,13 +380,13 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
|
||||
path->locks_want = new_locks_want;
|
||||
|
||||
while (path->nodes_locked &&
|
||||
(l = __fls(path->nodes_locked)) >= path->locks_want) {
|
||||
(l = btree_path_highest_level_locked(path)) >= path->locks_want) {
|
||||
if (l > path->level) {
|
||||
btree_node_unlock(trans, path, l);
|
||||
} else {
|
||||
if (btree_node_intent_locked(path, l)) {
|
||||
six_lock_downgrade(&path->l[l].b->c.lock);
|
||||
path->nodes_intent_locked ^= 1 << l;
|
||||
mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -61,27 +61,30 @@ static inline bool btree_node_read_locked(struct btree_path *path,
|
||||
|
||||
static inline bool btree_node_locked(struct btree_path *path, unsigned level)
|
||||
{
|
||||
return path->nodes_locked & (1 << level);
|
||||
return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
|
||||
}
|
||||
|
||||
static inline void mark_btree_node_unlocked(struct btree_path *path,
|
||||
unsigned level)
|
||||
{
|
||||
path->nodes_locked &= ~(1 << level);
|
||||
path->nodes_intent_locked &= ~(1 << level);
|
||||
}
|
||||
|
||||
static inline void mark_btree_node_locked_noreset(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
static inline void mark_btree_node_locked_noreset(struct btree_path *path,
|
||||
unsigned level,
|
||||
enum six_lock_type type)
|
||||
enum btree_node_locked_type type)
|
||||
{
|
||||
/* relying on this to avoid a branch */
|
||||
BUILD_BUG_ON(SIX_LOCK_read != 0);
|
||||
BUILD_BUG_ON(SIX_LOCK_intent != 1);
|
||||
|
||||
path->nodes_locked |= 1 << level;
|
||||
path->nodes_intent_locked |= type << level;
|
||||
path->nodes_locked &= ~(1 << level);
|
||||
path->nodes_intent_locked &= ~(1 << level);
|
||||
|
||||
if (type != BTREE_NODE_UNLOCKED) {
|
||||
path->nodes_locked |= 1 << level;
|
||||
path->nodes_intent_locked |= type << level;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mark_btree_node_unlocked(struct btree_path *path,
|
||||
unsigned level)
|
||||
{
|
||||
mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
|
||||
}
|
||||
|
||||
static inline void mark_btree_node_locked(struct btree_trans *trans,
|
||||
@ -89,19 +92,12 @@ static inline void mark_btree_node_locked(struct btree_trans *trans,
|
||||
unsigned level,
|
||||
enum six_lock_type type)
|
||||
{
|
||||
mark_btree_node_locked_noreset(trans, path, level, type);
|
||||
mark_btree_node_locked_noreset(path, level, type);
|
||||
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
|
||||
path->l[level].lock_taken_time = ktime_get_ns();
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void mark_btree_node_intent_locked(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
unsigned level)
|
||||
{
|
||||
mark_btree_node_locked_noreset(trans, path, level, SIX_LOCK_intent);
|
||||
}
|
||||
|
||||
static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
|
||||
{
|
||||
return level < path->locks_want
|
||||
@ -164,13 +160,23 @@ static inline void btree_node_unlock(struct btree_trans *trans,
|
||||
mark_btree_node_unlocked(path, level);
|
||||
}
|
||||
|
||||
static inline int btree_path_lowest_level_locked(struct btree_path *path)
|
||||
{
|
||||
return __ffs(path->nodes_locked);
|
||||
}
|
||||
|
||||
static inline int btree_path_highest_level_locked(struct btree_path *path)
|
||||
{
|
||||
return __fls(path->nodes_locked);
|
||||
}
|
||||
|
||||
static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
|
||||
struct btree_path *path)
|
||||
{
|
||||
btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
|
||||
|
||||
while (path->nodes_locked)
|
||||
btree_node_unlock(trans, path, __ffs(path->nodes_locked));
|
||||
btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -747,11 +747,12 @@ static inline void path_upgrade_readers(struct btree_trans *trans, struct btree_
|
||||
static inline void upgrade_readers(struct btree_trans *trans, struct btree_path *path)
|
||||
{
|
||||
struct btree *b = path_l(path)->b;
|
||||
unsigned l;
|
||||
|
||||
do {
|
||||
if (path->nodes_locked &&
|
||||
path->nodes_locked != path->nodes_intent_locked)
|
||||
path_upgrade_readers(trans, path);
|
||||
for (l = 0; l < BTREE_MAX_DEPTH; l++)
|
||||
if (btree_node_read_locked(path, l))
|
||||
path_upgrade_readers(trans, path);
|
||||
} while ((path = prev_btree_path(trans, path)) &&
|
||||
path_l(path)->b == b);
|
||||
}
|
||||
@ -770,11 +771,13 @@ static inline void normalize_read_intent_locks(struct btree_trans *trans)
|
||||
? trans->paths + trans->sorted[i + 1]
|
||||
: NULL;
|
||||
|
||||
if (path->nodes_locked) {
|
||||
if (path->nodes_intent_locked)
|
||||
nr_intent++;
|
||||
else
|
||||
nr_read++;
|
||||
switch (btree_node_locked_type(path, path->level)) {
|
||||
case BTREE_NODE_READ_LOCKED:
|
||||
nr_read++;
|
||||
break;
|
||||
case BTREE_NODE_INTENT_LOCKED:
|
||||
nr_intent++;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!next || path_l(path)->b != path_l(next)->b) {
|
||||
@ -797,7 +800,7 @@ static inline bool have_conflicting_read_lock(struct btree_trans *trans, struct
|
||||
//if (path == pos)
|
||||
// break;
|
||||
|
||||
if (path->nodes_locked != path->nodes_intent_locked &&
|
||||
if (btree_node_read_locked(path, path->level) &&
|
||||
!bch2_btree_path_upgrade(trans, path, path->level + 1))
|
||||
return true;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user