mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
bcachefs: New btree helpers
This introduces some new conveniences, to help cut down on boilerplate: - bch2_trans_kmalloc_nomemzero() - performance optimiation - bch2_bkey_make_mut() - bch2_bkey_get_mut() - bch2_bkey_get_mut_typed() - bch2_bkey_alloc() Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
a16b19cd1d
commit
994ba47543
@ -491,7 +491,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
|
||||
a->data_type != BCH_DATA_need_discard)
|
||||
return 0;
|
||||
|
||||
k = bch2_trans_kmalloc(trans, sizeof(*k));
|
||||
k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
|
||||
if (IS_ERR(k))
|
||||
return PTR_ERR(k);
|
||||
|
||||
|
@ -1578,15 +1578,12 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
|
||||
" should be %u",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
|
||||
r->refcount)) {
|
||||
struct bkey_i *new;
|
||||
struct bkey_i *new = bch2_bkey_make_mut(trans, k);
|
||||
|
||||
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
ret = PTR_ERR_OR_ZERO(new);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_reassemble(new, k);
|
||||
|
||||
if (!r->refcount)
|
||||
new->k.type = KEY_TYPE_deleted;
|
||||
else
|
||||
@ -1903,13 +1900,11 @@ static int gc_btree_gens_key(struct btree_trans *trans,
|
||||
percpu_up_read(&c->mark_lock);
|
||||
return 0;
|
||||
update:
|
||||
u = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
u = bch2_bkey_make_mut(trans, k);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_reassemble(u, k);
|
||||
|
||||
bch2_extent_normalize(c, bkey_i_to_s(u));
|
||||
return bch2_trans_update(trans, iter, u, 0);
|
||||
}
|
||||
|
@ -404,19 +404,76 @@ void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
|
||||
|
||||
static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
|
||||
{
|
||||
unsigned new_top = trans->mem_top + size;
|
||||
void *p = trans->mem + trans->mem_top;
|
||||
size = roundup(size, 8);
|
||||
|
||||
if (likely(trans->mem_top + size <= trans->mem_bytes)) {
|
||||
void *p = trans->mem + trans->mem_top;
|
||||
|
||||
if (likely(new_top <= trans->mem_bytes)) {
|
||||
trans->mem_top += size;
|
||||
memset(p, 0, size);
|
||||
return p;
|
||||
} else {
|
||||
return __bch2_trans_kmalloc(trans, size);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
|
||||
{
|
||||
size = roundup(size, 8);
|
||||
|
||||
if (likely(trans->mem_top + size <= trans->mem_bytes)) {
|
||||
void *p = trans->mem + trans->mem_top;
|
||||
|
||||
trans->mem_top += size;
|
||||
return p;
|
||||
} else {
|
||||
return __bch2_trans_kmalloc(trans, size);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k)
|
||||
{
|
||||
struct bkey_i *mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k));
|
||||
|
||||
if (!IS_ERR(mut))
|
||||
bkey_reassemble(mut, k);
|
||||
return mut;
|
||||
}
|
||||
|
||||
static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
|
||||
|
||||
return unlikely(IS_ERR(k.k))
|
||||
? ERR_CAST(k.k)
|
||||
: bch2_bkey_make_mut(trans, k);
|
||||
}
|
||||
|
||||
#define bch2_bkey_get_mut_typed(_trans, _iter, _type) \
|
||||
({ \
|
||||
struct bkey_i *_k = bch2_bkey_get_mut(_trans, _iter); \
|
||||
struct bkey_i_##_type *_ret; \
|
||||
\
|
||||
if (IS_ERR(_k)) \
|
||||
_ret = ERR_CAST(_k); \
|
||||
else if (unlikely(_k->k.type != KEY_TYPE_##_type)) \
|
||||
_ret = ERR_PTR(-ENOENT); \
|
||||
else \
|
||||
_ret = bkey_i_to_##_type(_k); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
#define bch2_bkey_alloc(_trans, _iter, _type) \
|
||||
({ \
|
||||
struct bkey_i_##_type *_k = bch2_trans_kmalloc_nomemzero(_trans, sizeof(*_k));\
|
||||
if (!IS_ERR(_k)) { \
|
||||
bkey_##_type##_init(&_k->k_i); \
|
||||
_k->k.p = (_iter)->pos; \
|
||||
} \
|
||||
_k; \
|
||||
})
|
||||
|
||||
u32 bch2_trans_begin(struct btree_trans *);
|
||||
|
||||
static inline struct btree *
|
||||
|
@ -1196,13 +1196,11 @@ static noinline int extent_front_merge(struct btree_trans *trans,
|
||||
struct bkey_i *update;
|
||||
int ret;
|
||||
|
||||
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
update = bch2_bkey_make_mut(trans, k);
|
||||
ret = PTR_ERR_OR_ZERO(update);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_reassemble(update, k);
|
||||
|
||||
if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
|
||||
return 0;
|
||||
|
||||
@ -1287,12 +1285,10 @@ int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
trans->extra_journal_res += compressed_sectors;
|
||||
|
||||
if (front_split) {
|
||||
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
update = bch2_bkey_make_mut(trans, k);
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(update, k);
|
||||
|
||||
bch2_cut_back(start, update);
|
||||
|
||||
bch2_trans_iter_init(trans, &update_iter, btree_id, update->k.p,
|
||||
@ -1311,12 +1307,10 @@ int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
|
||||
if (k.k->p.snapshot != insert->k.p.snapshot &&
|
||||
(front_split || back_split)) {
|
||||
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
update = bch2_bkey_make_mut(trans, k);
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(update, k);
|
||||
|
||||
bch2_cut_front(start, update);
|
||||
bch2_cut_back(insert->k.p, update);
|
||||
|
||||
@ -1360,11 +1354,10 @@ int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (back_split) {
|
||||
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
update = bch2_bkey_make_mut(trans, k);
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(update, k);
|
||||
bch2_cut_front(insert->k.p, update);
|
||||
|
||||
ret = bch2_trans_update_by_path(trans, iter.path, update,
|
||||
|
@ -1400,7 +1400,6 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
|
||||
s64 sectors, enum bch_data_type data_type)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i_stripe *s;
|
||||
struct bch_replicas_padded r;
|
||||
int ret = 0;
|
||||
@ -1408,20 +1407,16 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (k.k->type != KEY_TYPE_stripe) {
|
||||
bch2_trans_inconsistent(trans,
|
||||
s = bch2_bkey_get_mut_typed(trans, &iter, stripe);
|
||||
ret = PTR_ERR_OR_ZERO(s);
|
||||
if (unlikely(ret)) {
|
||||
bch2_trans_inconsistent_on(ret == -ENOENT, trans,
|
||||
"pointer to nonexistent stripe %llu",
|
||||
(u64) p.ec.idx);
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
|
||||
if (!bch2_ptr_matches_stripe(&s->v, p)) {
|
||||
bch2_trans_inconsistent(trans,
|
||||
"stripe pointer doesn't match stripe %llu",
|
||||
(u64) p.ec.idx);
|
||||
@ -1429,12 +1424,6 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
|
||||
goto err;
|
||||
}
|
||||
|
||||
s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
ret = PTR_ERR_OR_ZERO(s);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(&s->k_i, k);
|
||||
stripe_blockcount_set(&s->v, p.ec.block,
|
||||
stripe_blockcount_get(&s->v, p.ec.block) +
|
||||
sectors);
|
||||
@ -1710,8 +1699,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i *n;
|
||||
struct bkey_i *k;
|
||||
__le64 *refcount;
|
||||
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
@ -1720,19 +1708,12 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
k = bch2_bkey_get_mut(trans, &iter);
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(n, k);
|
||||
|
||||
refcount = bkey_refcount(n);
|
||||
refcount = bkey_refcount(k);
|
||||
if (!refcount) {
|
||||
bch2_bkey_val_to_text(&buf, c, p.s_c);
|
||||
bch2_trans_inconsistent(trans,
|
||||
@ -1756,12 +1737,12 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
u64 pad;
|
||||
|
||||
pad = max_t(s64, le32_to_cpu(v->front_pad),
|
||||
le64_to_cpu(v->idx) - bkey_start_offset(k.k));
|
||||
le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
|
||||
BUG_ON(pad > U32_MAX);
|
||||
v->front_pad = cpu_to_le32(pad);
|
||||
|
||||
pad = max_t(s64, le32_to_cpu(v->back_pad),
|
||||
k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
|
||||
k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
|
||||
BUG_ON(pad > U32_MAX);
|
||||
v->back_pad = cpu_to_le32(pad);
|
||||
}
|
||||
@ -1769,11 +1750,11 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
le64_add_cpu(refcount, add);
|
||||
|
||||
bch2_btree_iter_set_pos_to_extent_start(&iter);
|
||||
ret = bch2_trans_update(trans, &iter, n, 0);
|
||||
ret = bch2_trans_update(trans, &iter, k, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
*idx = k.k->p.offset;
|
||||
*idx = k->k.p.offset;
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
printbuf_exit(&buf);
|
||||
|
@ -844,13 +844,11 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
||||
|
||||
dev = s->key.v.ptrs[block].dev;
|
||||
|
||||
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
n = bch2_bkey_make_mut(trans, k);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_reassemble(n, k);
|
||||
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
|
||||
ec_ptr = (void *) bch2_bkey_has_device(bkey_i_to_s_c(n), dev);
|
||||
BUG_ON(!ec_ptr);
|
||||
|
@ -779,12 +779,10 @@ static int hash_redo_key(struct btree_trans *trans,
|
||||
if (IS_ERR(delete))
|
||||
return PTR_ERR(delete);
|
||||
|
||||
tmp = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
tmp = bch2_bkey_make_mut(trans, k);
|
||||
if (IS_ERR(tmp))
|
||||
return PTR_ERR(tmp);
|
||||
|
||||
bkey_reassemble(tmp, k);
|
||||
|
||||
bkey_init(&delete->k);
|
||||
delete->k.p = k_iter->pos;
|
||||
return bch2_btree_iter_traverse(k_iter) ?:
|
||||
|
@ -101,14 +101,12 @@ int bch2_lru_set(struct btree_trans *trans, u64 lru_id, u64 idx, u64 *time)
|
||||
BUG_ON(iter.pos.inode != lru_id);
|
||||
*time = iter.pos.offset;
|
||||
|
||||
lru = bch2_trans_kmalloc(trans, sizeof(*lru));
|
||||
lru = bch2_bkey_alloc(trans, &iter, lru);
|
||||
ret = PTR_ERR_OR_ZERO(lru);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_lru_init(&lru->k_i);
|
||||
lru->k.p = iter.pos;
|
||||
lru->v.idx = cpu_to_le64(idx);
|
||||
lru->v.idx = cpu_to_le64(idx);
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, &lru->k_i, 0);
|
||||
if (ret)
|
||||
@ -164,17 +162,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
|
||||
" for %s",
|
||||
(bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf),
|
||||
(bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
|
||||
struct bkey_i *update =
|
||||
bch2_trans_kmalloc(trans, sizeof(*update));
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(update);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_init(&update->k);
|
||||
update->k.p = lru_iter->pos;
|
||||
|
||||
ret = bch2_trans_update(trans, lru_iter, update, 0);
|
||||
ret = bch2_btree_delete_at(trans, lru_iter, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
@ -49,13 +49,11 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
|
||||
if (!bch2_bkey_has_device(k, dev_idx))
|
||||
return 0;
|
||||
|
||||
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
n = bch2_bkey_make_mut(trans, k);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_reassemble(n, k);
|
||||
|
||||
ret = drop_dev_ptrs(c, bkey_i_to_s(n), dev_idx, flags, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -192,13 +192,11 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans,
|
||||
struct bkey_i *n;
|
||||
int ret;
|
||||
|
||||
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
n = bch2_bkey_make_mut(trans, k);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_reassemble(n, k);
|
||||
|
||||
while (data_opts.kill_ptrs) {
|
||||
unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
|
||||
struct bch_extent_ptr *ptr;
|
||||
|
@ -377,33 +377,22 @@ int bch2_fs_snapshots_start(struct bch_fs *c)
|
||||
static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i_snapshot *s;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (k.k->type != KEY_TYPE_snapshot) {
|
||||
bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
|
||||
ret = -ENOENT;
|
||||
s = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
|
||||
ret = PTR_ERR_OR_ZERO(s);
|
||||
if (unlikely(ret)) {
|
||||
bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", id);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* already deleted? */
|
||||
if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v))
|
||||
if (BCH_SNAPSHOT_DELETED(&s->v))
|
||||
goto err;
|
||||
|
||||
s = bch2_trans_kmalloc(trans, sizeof(*s));
|
||||
ret = PTR_ERR_OR_ZERO(s);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(&s->k_i, k);
|
||||
SET_BCH_SNAPSHOT_DELETED(&s->v, true);
|
||||
SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
|
||||
s->v.subvol = 0;
|
||||
@ -421,7 +410,6 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
|
||||
struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_snapshot s;
|
||||
struct bkey_i_snapshot *parent;
|
||||
u32 parent_id;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
@ -445,27 +433,18 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
|
||||
parent_id = le32_to_cpu(s.v->parent);
|
||||
|
||||
if (parent_id) {
|
||||
struct bkey_i_snapshot *parent;
|
||||
|
||||
bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
|
||||
POS(0, parent_id),
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek_slot(&p_iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (k.k->type != KEY_TYPE_snapshot) {
|
||||
bch2_fs_inconsistent(trans->c, "missing snapshot %u", parent_id);
|
||||
ret = -ENOENT;
|
||||
parent = bch2_bkey_get_mut_typed(trans, &p_iter, snapshot);
|
||||
ret = PTR_ERR_OR_ZERO(parent);
|
||||
if (unlikely(ret)) {
|
||||
bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", parent_id);
|
||||
goto err;
|
||||
}
|
||||
|
||||
parent = bch2_trans_kmalloc(trans, sizeof(*parent));
|
||||
ret = PTR_ERR_OR_ZERO(parent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(&parent->k_i, k);
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
if (le32_to_cpu(parent->v.children[i]) == id)
|
||||
break;
|
||||
@ -522,13 +501,11 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
|
||||
goto err;
|
||||
}
|
||||
|
||||
n = bch2_trans_kmalloc(trans, sizeof(*n));
|
||||
n = bch2_bkey_alloc(trans, &iter, snapshot);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_snapshot_init(&n->k_i);
|
||||
n->k.p = iter.pos;
|
||||
n->v.flags = 0;
|
||||
n->v.parent = cpu_to_le32(parent);
|
||||
n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
|
||||
@ -545,24 +522,14 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
|
||||
|
||||
if (parent) {
|
||||
bch2_btree_iter_set_pos(&iter, POS(0, parent));
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (k.k->type != KEY_TYPE_snapshot) {
|
||||
bch_err(trans->c, "snapshot %u not found", parent);
|
||||
ret = -ENOENT;
|
||||
n = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (unlikely(ret)) {
|
||||
if (ret == -ENOENT)
|
||||
bch_err(trans->c, "snapshot %u not found", parent);
|
||||
goto err;
|
||||
}
|
||||
|
||||
n = bch2_trans_kmalloc(trans, sizeof(*n));
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(&n->k_i, k);
|
||||
|
||||
if (n->v.children[0] || n->v.children[1]) {
|
||||
bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
|
||||
ret = -EINVAL;
|
||||
@ -967,7 +934,6 @@ int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
|
||||
int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i_subvolume *n;
|
||||
struct subvolume_unlink_hook *h;
|
||||
int ret = 0;
|
||||
@ -976,23 +942,13 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
|
||||
POS(0, subvolid),
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (k.k->type != KEY_TYPE_subvolume) {
|
||||
bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
|
||||
ret = -EIO;
|
||||
n = bch2_bkey_get_mut_typed(trans, &iter, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (unlikely(ret)) {
|
||||
bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing subvolume %u", subvolid);
|
||||
goto err;
|
||||
}
|
||||
|
||||
n = bch2_trans_kmalloc(trans, sizeof(*n));
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(&n->k_i, k);
|
||||
SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
|
||||
@ -1049,27 +1005,19 @@ found_slot:
|
||||
|
||||
if (src_subvolid) {
|
||||
/* Creating a snapshot: */
|
||||
src_subvol = bch2_trans_kmalloc(trans, sizeof(*src_subvol));
|
||||
ret = PTR_ERR_OR_ZERO(src_subvol);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
|
||||
POS(0, src_subvolid),
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek_slot(&src_iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (k.k->type != KEY_TYPE_subvolume) {
|
||||
bch_err(c, "subvolume %u not found", src_subvolid);
|
||||
ret = -ENOENT;
|
||||
src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(src_subvol);
|
||||
if (unlikely(ret)) {
|
||||
bch2_fs_inconsistent_on(ret == -ENOENT, trans->c,
|
||||
"subvolume %u not found", src_subvolid);
|
||||
goto err;
|
||||
}
|
||||
|
||||
bkey_reassemble(&src_subvol->k_i, k);
|
||||
parent = le32_to_cpu(src_subvol->v.snapshot);
|
||||
}
|
||||
|
||||
@ -1086,18 +1034,16 @@ found_slot:
|
||||
goto err;
|
||||
}
|
||||
|
||||
new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
|
||||
new_subvol = bch2_bkey_alloc(trans, &dst_iter, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(new_subvol);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_subvolume_init(&new_subvol->k_i);
|
||||
new_subvol->v.flags = 0;
|
||||
new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
|
||||
new_subvol->v.inode = cpu_to_le64(inode);
|
||||
SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
|
||||
SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
|
||||
new_subvol->k.p = dst_iter.pos;
|
||||
ret = bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
Loading…
Reference in New Issue
Block a user