mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-23 14:24:25 +08:00
bcachefs: Increase commality between BTREE_ITER_NODES and BTREE_ITER_KEYS
Eventually BTREE_ITER_NODES should be going away. This patch is to fix a transaction iterator overflow in the btree node merge path because BTREE_ITER_NODES iterators couldn't be reused. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
2fa81d0b5b
commit
2d587674ba
@ -432,25 +432,24 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
|
||||
}
|
||||
|
||||
void __bch2_btree_iter_downgrade(struct btree_iter *iter,
|
||||
unsigned downgrade_to)
|
||||
unsigned new_locks_want)
|
||||
{
|
||||
unsigned l, new_locks_want = downgrade_to ?:
|
||||
(iter->flags & BTREE_ITER_INTENT ? 1 : 0);
|
||||
unsigned l;
|
||||
|
||||
if (iter->locks_want < downgrade_to) {
|
||||
iter->locks_want = new_locks_want;
|
||||
EBUG_ON(iter->locks_want < new_locks_want);
|
||||
|
||||
while (iter->nodes_locked &&
|
||||
(l = __fls(iter->nodes_locked)) >= iter->locks_want) {
|
||||
if (l > iter->level) {
|
||||
btree_node_unlock(iter, l);
|
||||
} else {
|
||||
if (btree_node_intent_locked(iter, l)) {
|
||||
six_lock_downgrade(&iter->l[l].b->c.lock);
|
||||
iter->nodes_intent_locked ^= 1 << l;
|
||||
}
|
||||
break;
|
||||
iter->locks_want = new_locks_want;
|
||||
|
||||
while (iter->nodes_locked &&
|
||||
(l = __fls(iter->nodes_locked)) >= iter->locks_want) {
|
||||
if (l > iter->level) {
|
||||
btree_node_unlock(iter, l);
|
||||
} else {
|
||||
if (btree_node_intent_locked(iter, l)) {
|
||||
six_lock_downgrade(&iter->l[l].b->c.lock);
|
||||
iter->nodes_intent_locked ^= 1 << l;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1993,6 +1992,8 @@ static inline void btree_iter_copy(struct btree_iter *dst,
|
||||
|
||||
struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
|
||||
enum btree_id btree_id, struct bpos pos,
|
||||
unsigned locks_want,
|
||||
unsigned depth,
|
||||
unsigned flags)
|
||||
{
|
||||
struct btree_iter *iter, *best = NULL;
|
||||
@ -2005,10 +2006,6 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
|
||||
pos.snapshot = btree_type_has_snapshots(btree_id)
|
||||
? U32_MAX : 0;
|
||||
|
||||
/* We always want a fresh iterator for node iterators: */
|
||||
if ((flags & BTREE_ITER_TYPE) == BTREE_ITER_NODES)
|
||||
goto alloc_iter;
|
||||
|
||||
trans_for_each_iter(trans, iter) {
|
||||
if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
|
||||
continue;
|
||||
@ -2023,7 +2020,7 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
|
||||
|
||||
best = iter;
|
||||
}
|
||||
alloc_iter:
|
||||
|
||||
if (!best) {
|
||||
iter = btree_trans_iter_alloc(trans);
|
||||
bch2_btree_iter_init(trans, iter, btree_id);
|
||||
@ -2047,13 +2044,26 @@ alloc_iter:
|
||||
|
||||
iter->snapshot = pos.snapshot;
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_INTENT))
|
||||
bch2_btree_iter_downgrade(iter);
|
||||
else if (!iter->locks_want) {
|
||||
iter->locks_want = 1;
|
||||
locks_want = min(locks_want, BTREE_MAX_DEPTH);
|
||||
|
||||
if (locks_want > iter->locks_want) {
|
||||
iter->locks_want = locks_want;
|
||||
btree_iter_get_locks(iter, true, false);
|
||||
} else if (locks_want < iter->locks_want) {
|
||||
__bch2_btree_iter_downgrade(iter, locks_want);
|
||||
}
|
||||
|
||||
while (iter->level < depth) {
|
||||
btree_node_unlock(iter, iter->level);
|
||||
iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
|
||||
iter->level++;
|
||||
}
|
||||
|
||||
while (iter->level > depth)
|
||||
iter->l[--iter->level].b = BTREE_ITER_NO_NODE_INIT;
|
||||
|
||||
iter->min_depth = depth;
|
||||
|
||||
bch2_btree_iter_set_pos(iter, pos);
|
||||
btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
|
||||
|
||||
@ -2069,21 +2079,16 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
|
||||
{
|
||||
struct btree_iter *iter =
|
||||
__bch2_trans_get_iter(trans, btree_id, pos,
|
||||
BTREE_ITER_NODES|
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
flags);
|
||||
unsigned i;
|
||||
locks_want, depth,
|
||||
BTREE_ITER_NODES|
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
flags);
|
||||
|
||||
BUG_ON(bkey_cmp(iter->pos, pos));
|
||||
|
||||
iter->locks_want = locks_want;
|
||||
iter->level = depth;
|
||||
iter->min_depth = depth;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(iter->l); i++)
|
||||
iter->l[i].b = NULL;
|
||||
iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
|
||||
BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
|
||||
BUG_ON(iter->level != depth);
|
||||
BUG_ON(iter->min_depth != depth);
|
||||
iter->ip_allocated = _RET_IP_;
|
||||
|
||||
return iter;
|
||||
@ -2325,6 +2330,7 @@ bch2_btree_iter_node_to_text(struct printbuf *out,
|
||||
bch2_bpos_to_text(out, btree_node_pos(_b, type));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_iter *iter;
|
||||
@ -2335,6 +2341,7 @@ static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
|
@ -131,8 +131,10 @@ void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned);
|
||||
|
||||
static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
|
||||
{
|
||||
if (iter->locks_want > (iter->flags & BTREE_ITER_INTENT) ? 1 : 0)
|
||||
__bch2_btree_iter_downgrade(iter, 0);
|
||||
unsigned new_locks_want = (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
|
||||
|
||||
if (iter->locks_want > new_locks_want)
|
||||
__bch2_btree_iter_downgrade(iter, new_locks_want);
|
||||
}
|
||||
|
||||
void bch2_trans_downgrade(struct btree_trans *);
|
||||
@ -258,14 +260,17 @@ int bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
|
||||
void bch2_trans_unlink_iters(struct btree_trans *);
|
||||
|
||||
struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
|
||||
struct bpos, unsigned);
|
||||
struct bpos, unsigned,
|
||||
unsigned, unsigned);
|
||||
|
||||
static inline struct btree_iter *
|
||||
bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
|
||||
struct bpos pos, unsigned flags)
|
||||
{
|
||||
struct btree_iter *iter =
|
||||
__bch2_trans_get_iter(trans, btree_id, pos, flags);
|
||||
__bch2_trans_get_iter(trans, btree_id, pos,
|
||||
(flags & BTREE_ITER_INTENT) != 0, 0,
|
||||
flags);
|
||||
iter->ip_allocated = _THIS_IP_;
|
||||
return iter;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user