mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
bcachefs fixes for 6.9-rc3
Lots of fixes for situations with extreme filesystem damage. One fix ("Fix journal pins in btree write buffer") applicable to normal usage; also a dio performance fix. New repair/construction code is in the final stages, should be ready in about a week. Anyone that lost btree interior nodes (or a variety of other damage) as a result of the splitbrain bug will be able to repair then. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEKnAFLkS8Qha+jvQrE6szbY3KbnYFAmYLMPsACgkQE6szbY3K bnYVvxAAhNgoYTsjPbA8sjCtLIsEflz76BvNT7CAVB9QaF0Em/UJvKpIJ30JkNTj j7N8XxvRJmreSKbKGeWHRcAejHvu7bky+SCHKDHyYHxmPLlcEkwSuXcR0fYnMAQ8 Ne4ELpL0jmWOS1QHds8v8O0SP+SgYEe8E1Pryz88kLL1eWJz348RWQkg6DtsVAyO DySr8NRntZQyRo5C9H6iEcnLdG2snhKy+AOVDIySn9P5mLuaPRSANPNNT+Kss79p z62ZwB7So6SE23LPAUQ4HaJoGtaJlB/gxNd8J8ma3JybbEcz4PmcyVIfN3A62FVi gOUzd1pi8/NjOvtzojghvJ1+8zxD4kmZnoX5qu+Jx3rIICplQ6u9rYUiwTQRxYbw QDeJkwmBdQFosl6iG+ji26ui0yZO1GNQpu2XCCv7JSVLddgNZLRb1v+b7uQzuYLA 7gQTYYXF+1g/WK3se3NlFVsPV+keqPFX2pYX1ySptLLr3QD5SX6d2SJIkNb4oV6c T+1YA7BjGIzgSy4ZE/Q1jVQCKnIYYsW5bL9mvh/q2SSUfMc3uSUMRM4zsRCW6djB SQKehKVAZBGUNgB5WOFslEUKwUPnGGfO1YAXyqumf1tkSs59CI5NLZfTQFaDFOND 2iS9HmxE4zdOckaM0eBkhAN349YJSaVZwD3C4Nb+qHjzT50ly7s= =OLvh -----END PGP SIGNATURE----- Merge tag 'bcachefs-2024-04-01' of https://evilpiepirate.org/git/bcachefs Pull bcachefs fixes from Kent Overstreet: "Lots of fixes for situations with extreme filesystem damage. One fix ("Fix journal pins in btree write buffer") applicable to normal usage; also a dio performance fix. New repair/construction code is in the final stages, should be ready in about a week. Anyone that lost btree interior nodes (or a variety of other damage) as a result of the splitbrain bug will be able to repair then" * tag 'bcachefs-2024-04-01' of https://evilpiepirate.org/git/bcachefs: (32 commits) bcachefs: On emergency shutdown, print out current journal sequence number bcachefs: Fix overlapping extent repair bcachefs: Fix remove_dirent() bcachefs: Logged op errors should be ignored bcachefs: Improve -o norecovery; opts.recovery_pass_limit bcachefs: bch2_run_explicit_recovery_pass_persistent() bcachefs: Ensure bch_sb_field_ext always exists bcachefs: Flush journal immediately after replay if we did early repair bcachefs: Resume logged ops after fsck bcachefs: Add error messages to logged ops fns bcachefs: Split out recovery_passes.c bcachefs: fix backpointer for missing alloc key msg bcachefs: Fix bch2_btree_increase_depth() bcachefs: Kill bch2_bkey_ptr_data_type() bcachefs: Fix use after free in check_root_trans() bcachefs: Fix repair path for missing indirect extents bcachefs: Fix use after free in bch2_check_fix_ptrs() bcachefs: Fix btree node keys accounting in topology repair path bcachefs: Check btree ptr min_key in .invalid bcachefs: add REQ_SYNC and REQ_IDLE in write dio ...
This commit is contained in:
commit
67199a47dd
@ -67,6 +67,7 @@ bcachefs-y := \
|
||||
quota.o \
|
||||
rebalance.o \
|
||||
recovery.o \
|
||||
recovery_passes.o \
|
||||
reflink.o \
|
||||
replicas.o \
|
||||
sb-clean.o \
|
||||
|
@ -29,8 +29,7 @@ static bool extent_matches_bp(struct bch_fs *c,
|
||||
if (p.ptr.cached)
|
||||
continue;
|
||||
|
||||
bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
|
||||
&bucket2, &bp2);
|
||||
bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bucket2, &bp2);
|
||||
if (bpos_eq(bucket, bucket2) &&
|
||||
!memcmp(&bp, &bp2, sizeof(bp)))
|
||||
return true;
|
||||
@ -44,6 +43,11 @@ int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
|
||||
struct printbuf *err)
|
||||
{
|
||||
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
|
||||
|
||||
/* these will be caught by fsck */
|
||||
if (!bch2_dev_exists2(c, bp.k->p.inode))
|
||||
return 0;
|
||||
|
||||
struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
|
||||
int ret = 0;
|
||||
|
||||
@ -378,7 +382,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
|
||||
backpointer_to_missing_alloc,
|
||||
"backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
|
||||
alloc_iter.pos.inode, alloc_iter.pos.offset,
|
||||
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, bp_iter, 0);
|
||||
goto out;
|
||||
}
|
||||
@ -502,8 +506,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
|
||||
if (p.ptr.cached)
|
||||
continue;
|
||||
|
||||
bch2_extent_ptr_to_bp(c, btree, level,
|
||||
k, p, &bucket_pos, &bp);
|
||||
bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bucket_pos, &bp);
|
||||
|
||||
ret = check_bp_exists(trans, s, bucket_pos, bp, k);
|
||||
if (ret)
|
||||
|
@ -90,20 +90,40 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
|
||||
return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
|
||||
}
|
||||
|
||||
static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, struct extent_ptr_decoded p)
|
||||
static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
|
||||
struct extent_ptr_decoded p,
|
||||
const union bch_extent_entry *entry)
|
||||
{
|
||||
return level ? BCH_DATA_btree :
|
||||
p.has_ec ? BCH_DATA_stripe :
|
||||
BCH_DATA_user;
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_btree_ptr:
|
||||
case KEY_TYPE_btree_ptr_v2:
|
||||
return BCH_DATA_btree;
|
||||
case KEY_TYPE_extent:
|
||||
case KEY_TYPE_reflink_v:
|
||||
return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
|
||||
case KEY_TYPE_stripe: {
|
||||
const struct bch_extent_ptr *ptr = &entry->ptr;
|
||||
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
|
||||
|
||||
BUG_ON(ptr < s.v->ptrs ||
|
||||
ptr >= s.v->ptrs + s.v->nr_blocks);
|
||||
|
||||
return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
|
||||
? BCH_DATA_parity
|
||||
: BCH_DATA_user;
|
||||
}
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, struct extent_ptr_decoded p,
|
||||
const union bch_extent_entry *entry,
|
||||
struct bpos *bucket_pos, struct bch_backpointer *bp)
|
||||
{
|
||||
enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
|
||||
s64 sectors = level ? btree_sectors(c) : k.k->size;
|
||||
u32 bucket_offset;
|
||||
|
||||
|
@ -209,7 +209,7 @@
|
||||
#include "fifo.h"
|
||||
#include "nocow_locking_types.h"
|
||||
#include "opts.h"
|
||||
#include "recovery_types.h"
|
||||
#include "recovery_passes_types.h"
|
||||
#include "sb-errors_types.h"
|
||||
#include "seqmutex.h"
|
||||
#include "time_stats.h"
|
||||
@ -810,7 +810,6 @@ struct bch_fs {
|
||||
|
||||
/* snapshot.c: */
|
||||
struct snapshot_table __rcu *snapshots;
|
||||
size_t snapshot_table_size;
|
||||
struct mutex snapshot_table_lock;
|
||||
struct rw_semaphore snapshot_create_lock;
|
||||
|
||||
|
@ -134,18 +134,24 @@ void bch2_dump_btree_node_iter(struct btree *b,
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
|
||||
void __bch2_verify_btree_nr_keys(struct btree *b)
|
||||
struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b)
|
||||
{
|
||||
struct bset_tree *t;
|
||||
struct bkey_packed *k;
|
||||
struct btree_nr_keys nr = { 0 };
|
||||
struct btree_nr_keys nr = {};
|
||||
|
||||
for_each_bset(b, t)
|
||||
bset_tree_for_each_key(b, t, k)
|
||||
if (!bkey_deleted(k))
|
||||
btree_keys_account_key_add(&nr, t - b->set, k);
|
||||
return nr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
|
||||
void __bch2_verify_btree_nr_keys(struct btree *b)
|
||||
{
|
||||
struct btree_nr_keys nr = bch2_btree_node_count_keys(b);
|
||||
|
||||
BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
|
||||
}
|
||||
|
@ -458,6 +458,8 @@ struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
|
||||
|
||||
/* Accounting: */
|
||||
|
||||
struct btree_nr_keys bch2_btree_node_count_keys(struct btree *);
|
||||
|
||||
static inline void btree_keys_account_key(struct btree_nr_keys *n,
|
||||
unsigned bset,
|
||||
struct bkey_packed *k,
|
||||
|
@ -808,7 +808,8 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
|
||||
prt_printf(&buf, "\nmax ");
|
||||
bch2_bpos_to_text(&buf, b->data->max_key);
|
||||
|
||||
bch2_fs_inconsistent(c, "%s", buf.buf);
|
||||
bch2_fs_topology_error(c, "%s", buf.buf);
|
||||
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
@ -1134,6 +1135,8 @@ void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
|
||||
b = btree_cache_find(bc, k);
|
||||
if (!b)
|
||||
return;
|
||||
|
||||
BUG_ON(b == btree_node_root(trans->c, b));
|
||||
wait_on_io:
|
||||
/* not allowed to wait on io with btree locks held: */
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_background.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "backpointers.h"
|
||||
#include "bkey_methods.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_journal_iter.h"
|
||||
@ -24,7 +25,7 @@
|
||||
#include "journal.h"
|
||||
#include "keylist.h"
|
||||
#include "move.h"
|
||||
#include "recovery.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "reflink.h"
|
||||
#include "replicas.h"
|
||||
#include "super-io.h"
|
||||
@ -70,90 +71,6 @@ static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
|
||||
__gc_pos_set(c, new_pos);
|
||||
}
|
||||
|
||||
/*
|
||||
* Missing: if an interior btree node is empty, we need to do something -
|
||||
* perhaps just kill it
|
||||
*/
|
||||
static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
struct btree *b,
|
||||
struct bkey_buf *prev,
|
||||
struct bkey_buf cur,
|
||||
bool is_last)
|
||||
{
|
||||
struct bpos node_start = b->data->min_key;
|
||||
struct bpos node_end = b->data->max_key;
|
||||
struct bpos expected_start = bkey_deleted(&prev->k->k)
|
||||
? node_start
|
||||
: bpos_successor(prev->k->k.p);
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
|
||||
struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
|
||||
|
||||
if (!bpos_eq(expected_start, bp->v.min_key)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
if (bkey_deleted(&prev->k->k)) {
|
||||
prt_printf(&buf1, "start of node: ");
|
||||
bch2_bpos_to_text(&buf1, node_start);
|
||||
} else {
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k));
|
||||
}
|
||||
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
|
||||
|
||||
if (__fsck_err(c,
|
||||
FSCK_CAN_FIX|
|
||||
FSCK_CAN_IGNORE|
|
||||
FSCK_NO_RATELIMIT,
|
||||
btree_node_topology_bad_min_key,
|
||||
"btree node with incorrect min_key at btree %s level %u:\n"
|
||||
" prev %s\n"
|
||||
" cur %s",
|
||||
bch2_btree_id_str(b->c.btree_id), b->c.level,
|
||||
buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) {
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
goto err;
|
||||
} else {
|
||||
set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
printbuf_reset(&buf1);
|
||||
printbuf_reset(&buf2);
|
||||
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
|
||||
bch2_bpos_to_text(&buf2, node_end);
|
||||
|
||||
if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT,
|
||||
btree_node_topology_bad_max_key,
|
||||
"btree node with incorrect max_key at btree %s level %u:\n"
|
||||
" %s\n"
|
||||
" expected %s",
|
||||
bch2_btree_id_str(b->c.btree_id), b->c.level,
|
||||
buf1.buf, buf2.buf) &&
|
||||
should_restart_for_topology_repair(c)) {
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
goto err;
|
||||
} else {
|
||||
set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
|
||||
}
|
||||
}
|
||||
|
||||
bch2_bkey_buf_copy(prev, c, cur.k);
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf2);
|
||||
printbuf_exit(&buf1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
|
||||
{
|
||||
switch (b->key.k.type) {
|
||||
@ -445,6 +362,7 @@ again:
|
||||
prev = NULL;
|
||||
|
||||
if (ret == DROP_PREV_NODE) {
|
||||
bch_info(c, "dropped prev node");
|
||||
bch2_btree_node_evict(trans, prev_k.k);
|
||||
ret = bch2_journal_key_delete(c, b->c.btree_id,
|
||||
b->c.level, prev_k.k->k.p);
|
||||
@ -591,7 +509,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, p, entry_c);
|
||||
|
||||
if (fsck_err_on(!g->gen_valid,
|
||||
c, ptr_to_missing_alloc_key,
|
||||
@ -657,7 +575,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
continue;
|
||||
|
||||
if (fsck_err_on(bucket_data_type(g->data_type) &&
|
||||
bucket_data_type(g->data_type) != data_type, c,
|
||||
bucket_data_type(g->data_type) !=
|
||||
bucket_data_type(data_type), c,
|
||||
ptr_bucket_data_type_mismatch,
|
||||
"bucket %u:%zu different types of data in same bucket: %s, %s\n"
|
||||
"while marking %s",
|
||||
@ -698,18 +617,13 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
}
|
||||
|
||||
if (do_update) {
|
||||
struct bkey_ptrs ptrs;
|
||||
union bch_extent_entry *entry;
|
||||
struct bch_extent_ptr *ptr;
|
||||
struct bkey_i *new;
|
||||
|
||||
if (is_root) {
|
||||
bch_err(c, "cannot update btree roots yet");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
|
||||
struct bkey_i *new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
|
||||
if (!new) {
|
||||
ret = -BCH_ERR_ENOMEM_gc_repair_key;
|
||||
bch_err_msg(c, ret, "allocating new key");
|
||||
@ -724,7 +638,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
* btree node isn't there anymore, the read path will
|
||||
* sort it out:
|
||||
*/
|
||||
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
|
||||
@ -732,19 +646,26 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
ptr->gen = g->gen;
|
||||
}
|
||||
} else {
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
|
||||
struct bkey_ptrs ptrs;
|
||||
union bch_extent_entry *entry;
|
||||
restart_drop_ptrs:
|
||||
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
|
||||
|
||||
(ptr->cached &&
|
||||
(!g->gen_valid || gen_cmp(ptr->gen, g->gen) > 0)) ||
|
||||
(!ptr->cached &&
|
||||
gen_cmp(ptr->gen, g->gen) < 0) ||
|
||||
gen_cmp(g->gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
|
||||
(g->data_type &&
|
||||
g->data_type != data_type);
|
||||
}));
|
||||
if ((p.ptr.cached &&
|
||||
(!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
|
||||
(!p.ptr.cached &&
|
||||
gen_cmp(p.ptr.gen, g->gen) < 0) ||
|
||||
gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
|
||||
(g->data_type &&
|
||||
g->data_type != data_type)) {
|
||||
bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
|
||||
goto restart_drop_ptrs;
|
||||
}
|
||||
}
|
||||
again:
|
||||
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
bkey_extent_entry_for_each(ptrs, entry) {
|
||||
@ -774,12 +695,6 @@ found:
|
||||
}
|
||||
}
|
||||
|
||||
ret = bch2_journal_key_insert_take(c, btree_id, level, new);
|
||||
if (ret) {
|
||||
kfree(new);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (level)
|
||||
bch2_btree_node_update_key_early(trans, btree_id, level - 1, *k, new);
|
||||
|
||||
@ -793,6 +708,12 @@ found:
|
||||
bch_info(c, "new key %s", buf.buf);
|
||||
}
|
||||
|
||||
ret = bch2_journal_key_insert_take(c, btree_id, level, new);
|
||||
if (ret) {
|
||||
kfree(new);
|
||||
goto err;
|
||||
}
|
||||
|
||||
*k = bkey_i_to_s_c(new);
|
||||
}
|
||||
err:
|
||||
@ -819,10 +740,6 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
|
||||
BUG_ON(bch2_journal_seq_verify &&
|
||||
k->k->version.lo > atomic64_read(&c->journal.seq));
|
||||
|
||||
ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
|
||||
bkey_version_in_future,
|
||||
"key version number higher than recorded: %llu > %llu",
|
||||
@ -831,8 +748,13 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
|
||||
atomic64_set(&c->key_version, k->k->version.lo);
|
||||
}
|
||||
|
||||
ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
|
||||
bch2_key_trigger(trans, btree_id, level, old,
|
||||
unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
|
||||
fsck_err:
|
||||
err:
|
||||
bch_err_fn(c, ret);
|
||||
@ -841,42 +763,30 @@ err:
|
||||
|
||||
static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_node_iter iter;
|
||||
struct bkey unpacked;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_buf prev, cur;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_btree_node_check_topology(trans, b);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!btree_node_type_needs_gc(btree_node_type(b)))
|
||||
return 0;
|
||||
|
||||
bch2_btree_node_iter_init_from_start(&iter, b);
|
||||
bch2_bkey_buf_init(&prev);
|
||||
bch2_bkey_buf_init(&cur);
|
||||
bkey_init(&prev.k->k);
|
||||
|
||||
while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
|
||||
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
|
||||
&k, initial);
|
||||
if (ret)
|
||||
break;
|
||||
return ret;
|
||||
|
||||
bch2_btree_node_iter_advance(&iter, b);
|
||||
|
||||
if (b->c.level) {
|
||||
bch2_bkey_buf_reassemble(&cur, c, k);
|
||||
|
||||
ret = bch2_gc_check_topology(c, b, &prev, cur,
|
||||
bch2_btree_node_iter_end(&iter));
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_bkey_buf_exit(&cur, c);
|
||||
bch2_bkey_buf_exit(&prev, c);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
|
||||
@ -925,14 +835,16 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_and_journal_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_buf cur, prev;
|
||||
struct bkey_buf cur;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_btree_node_check_topology(trans, b);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
|
||||
bch2_bkey_buf_init(&prev);
|
||||
bch2_bkey_buf_init(&cur);
|
||||
bkey_init(&prev.k->k);
|
||||
|
||||
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
||||
BUG_ON(bpos_lt(k.k->p, b->data->min_key));
|
||||
@ -943,20 +855,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
if (ret)
|
||||
goto fsck_err;
|
||||
|
||||
if (b->c.level) {
|
||||
bch2_bkey_buf_reassemble(&cur, c, k);
|
||||
k = bkey_i_to_s_c(cur.k);
|
||||
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
|
||||
ret = bch2_gc_check_topology(c, b,
|
||||
&prev, cur,
|
||||
!bch2_btree_and_journal_iter_peek(&iter).k);
|
||||
if (ret)
|
||||
goto fsck_err;
|
||||
} else {
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
}
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
}
|
||||
|
||||
if (b->c.level > target_depth) {
|
||||
@ -1015,7 +914,6 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
}
|
||||
fsck_err:
|
||||
bch2_bkey_buf_exit(&cur, c);
|
||||
bch2_bkey_buf_exit(&prev, c);
|
||||
bch2_btree_and_journal_iter_exit(&iter);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
|
@ -654,6 +654,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
|
||||
*/
|
||||
bch2_bset_set_no_aux_tree(b, b->set);
|
||||
bch2_btree_build_aux_trees(b);
|
||||
b->nr = bch2_btree_node_count_keys(b);
|
||||
|
||||
struct bkey_s_c k;
|
||||
struct bkey unpacked;
|
||||
@ -1657,7 +1658,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
|
||||
|
||||
prt_str(&buf, "btree node read error: no device to read from\n at ");
|
||||
bch2_btree_pos_to_text(&buf, c, b);
|
||||
bch_err(c, "%s", buf.buf);
|
||||
bch_err_ratelimited(c, "%s", buf.buf);
|
||||
|
||||
if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
|
||||
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
|
||||
|
@ -927,8 +927,22 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto err;
|
||||
} else {
|
||||
bch2_bkey_buf_unpack(&tmp, c, l->b,
|
||||
bch2_btree_node_iter_peek(&l->iter, l->b));
|
||||
struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
|
||||
if (!k) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
prt_str(&buf, "node not found at pos ");
|
||||
bch2_bpos_to_text(&buf, path->pos);
|
||||
prt_str(&buf, " within parent node ");
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
|
||||
|
||||
bch2_fs_fatal_error(c, "%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
ret = -BCH_ERR_btree_need_topology_repair;
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
|
||||
|
||||
if ((flags & BTREE_ITER_PREFETCH) &&
|
||||
c->opts.btree_node_prefetch) {
|
||||
@ -962,7 +976,6 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int bch2_btree_path_traverse_all(struct btree_trans *trans)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -2790,6 +2803,31 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
|
||||
struct btree_transaction_stats *s = btree_trans_stats(trans);
|
||||
s->max_mem = max(s->max_mem, new_bytes);
|
||||
|
||||
if (trans->used_mempool) {
|
||||
if (trans->mem_bytes >= new_bytes)
|
||||
goto out_change_top;
|
||||
|
||||
/* No more space from mempool item, need malloc new one */
|
||||
new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
|
||||
if (unlikely(!new_mem)) {
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
new_mem = kmalloc(new_bytes, GFP_KERNEL);
|
||||
if (!new_mem)
|
||||
return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
|
||||
|
||||
ret = bch2_trans_relock(trans);
|
||||
if (ret) {
|
||||
kfree(new_mem);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
memcpy(new_mem, trans->mem, trans->mem_top);
|
||||
trans->used_mempool = false;
|
||||
mempool_free(trans->mem, &c->btree_trans_mem_pool);
|
||||
goto out_new_mem;
|
||||
}
|
||||
|
||||
new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
|
||||
if (unlikely(!new_mem)) {
|
||||
bch2_trans_unlock(trans);
|
||||
@ -2798,6 +2836,8 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
|
||||
if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
|
||||
new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
|
||||
new_bytes = BTREE_TRANS_MEM_MAX;
|
||||
memcpy(new_mem, trans->mem, trans->mem_top);
|
||||
trans->used_mempool = true;
|
||||
kfree(trans->mem);
|
||||
}
|
||||
|
||||
@ -2811,7 +2851,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
out_new_mem:
|
||||
trans->mem = new_mem;
|
||||
trans->mem_bytes = new_bytes;
|
||||
|
||||
@ -2819,7 +2859,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
|
||||
trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
|
||||
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
|
||||
}
|
||||
|
||||
out_change_top:
|
||||
p = trans->mem + trans->mem_top;
|
||||
trans->mem_top += size;
|
||||
memset(p, 0, size);
|
||||
@ -3093,7 +3133,7 @@ void bch2_trans_put(struct btree_trans *trans)
|
||||
if (paths_allocated != trans->_paths_allocated)
|
||||
kvfree_rcu_mightsleep(paths_allocated);
|
||||
|
||||
if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
|
||||
if (trans->used_mempool)
|
||||
mempool_free(trans->mem, &c->btree_trans_mem_pool);
|
||||
else
|
||||
kfree(trans->mem);
|
||||
|
@ -261,6 +261,22 @@ int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
|
||||
return bch2_journal_key_insert(c, id, level, &whiteout);
|
||||
}
|
||||
|
||||
bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree,
|
||||
unsigned level, struct bpos pos)
|
||||
{
|
||||
struct journal_keys *keys = &trans->c->journal_keys;
|
||||
size_t idx = bch2_journal_key_search(keys, btree, level, pos);
|
||||
|
||||
if (!trans->journal_replay_not_finished)
|
||||
return false;
|
||||
|
||||
return (idx < keys->size &&
|
||||
keys->data[idx].btree_id == btree &&
|
||||
keys->data[idx].level == level &&
|
||||
bpos_eq(keys->data[idx].k->k.p, pos) &&
|
||||
bkey_deleted(&keys->data[idx].k->k));
|
||||
}
|
||||
|
||||
void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
|
||||
unsigned level, struct bpos pos)
|
||||
{
|
||||
@ -363,7 +379,7 @@ static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter
|
||||
|
||||
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
|
||||
{
|
||||
struct bkey_s_c btree_k, journal_k, ret;
|
||||
struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret;
|
||||
|
||||
if (iter->prefetch && iter->journal.level)
|
||||
btree_and_journal_iter_prefetch(iter);
|
||||
@ -375,9 +391,10 @@ again:
|
||||
bpos_lt(btree_k.k->p, iter->pos))
|
||||
bch2_journal_iter_advance_btree(iter);
|
||||
|
||||
while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
|
||||
bpos_lt(journal_k.k->p, iter->pos))
|
||||
bch2_journal_iter_advance(&iter->journal);
|
||||
if (iter->trans->journal_replay_not_finished)
|
||||
while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
|
||||
bpos_lt(journal_k.k->p, iter->pos))
|
||||
bch2_journal_iter_advance(&iter->journal);
|
||||
|
||||
ret = journal_k.k &&
|
||||
(!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
|
||||
@ -435,7 +452,9 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
|
||||
|
||||
bch2_btree_node_iter_init_from_start(&node_iter, b);
|
||||
__bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key);
|
||||
list_add(&iter->journal.list, &trans->c->journal_iters);
|
||||
if (trans->journal_replay_not_finished &&
|
||||
!test_bit(BCH_FS_may_go_rw, &trans->c->flags))
|
||||
list_add(&iter->journal.list, &trans->c->journal_iters);
|
||||
}
|
||||
|
||||
/* sort and dedup all keys in the journal: */
|
||||
|
@ -40,8 +40,8 @@ int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
|
||||
unsigned, struct bkey_i *);
|
||||
int bch2_journal_key_delete(struct bch_fs *, enum btree_id,
|
||||
unsigned, struct bpos);
|
||||
void bch2_journal_key_overwritten(struct bch_fs *, enum btree_id,
|
||||
unsigned, struct bpos);
|
||||
bool bch2_key_deleted_in_journal(struct btree_trans *, enum btree_id, unsigned, struct bpos);
|
||||
void bch2_journal_key_overwritten(struct bch_fs *, enum btree_id, unsigned, struct bpos);
|
||||
|
||||
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *);
|
||||
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *);
|
||||
|
@ -318,7 +318,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
|
||||
!(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
|
||||
test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
|
||||
i->k->k.p.snapshot &&
|
||||
bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot));
|
||||
bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot) > 0);
|
||||
}
|
||||
|
||||
static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
|
||||
|
@ -38,6 +38,9 @@ static noinline int extent_front_merge(struct btree_trans *trans,
|
||||
struct bkey_i *update;
|
||||
int ret;
|
||||
|
||||
if (unlikely(trans->journal_replay_not_finished))
|
||||
return 0;
|
||||
|
||||
update = bch2_bkey_make_mut_noupdate(trans, k);
|
||||
ret = PTR_ERR_OR_ZERO(update);
|
||||
if (ret)
|
||||
@ -69,6 +72,9 @@ static noinline int extent_back_merge(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
int ret;
|
||||
|
||||
if (unlikely(trans->journal_replay_not_finished))
|
||||
return 0;
|
||||
|
||||
ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
|
||||
bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
|
||||
if (ret < 0)
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "bkey_methods.h"
|
||||
#include "btree_cache.h"
|
||||
#include "btree_gc.h"
|
||||
@ -18,6 +19,7 @@
|
||||
#include "journal.h"
|
||||
#include "journal_reclaim.h"
|
||||
#include "keylist.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "replicas.h"
|
||||
#include "super-io.h"
|
||||
#include "trace.h"
|
||||
@ -44,56 +46,103 @@ static btree_path_idx_t get_unlocked_mut_path(struct btree_trans *trans,
|
||||
return path_idx;
|
||||
}
|
||||
|
||||
/* Debug code: */
|
||||
|
||||
/*
|
||||
* Verify that child nodes correctly span parent node's range:
|
||||
*/
|
||||
static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
|
||||
int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
|
||||
{
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
struct bpos next_node = b->data->min_key;
|
||||
struct btree_node_iter iter;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bpos node_min = b->key.k.type == KEY_TYPE_btree_ptr_v2
|
||||
? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key
|
||||
: b->data->min_key;
|
||||
struct btree_and_journal_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_btree_ptr_v2 bp;
|
||||
struct bkey unpacked;
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
struct bkey_buf prev;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!b->c.level);
|
||||
BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
!bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
|
||||
b->data->min_key));
|
||||
|
||||
if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
|
||||
return;
|
||||
if (!b->c.level)
|
||||
return 0;
|
||||
|
||||
bch2_btree_node_iter_init_from_start(&iter, b);
|
||||
bch2_bkey_buf_init(&prev);
|
||||
bkey_init(&prev.k->k);
|
||||
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
|
||||
|
||||
while (1) {
|
||||
k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked);
|
||||
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
||||
if (k.k->type != KEY_TYPE_btree_ptr_v2)
|
||||
break;
|
||||
bp = bkey_s_c_to_btree_ptr_v2(k);
|
||||
goto out;
|
||||
|
||||
if (!bpos_eq(next_node, bp.v->min_key)) {
|
||||
bch2_dump_btree_node(c, b);
|
||||
bch2_bpos_to_text(&buf1, next_node);
|
||||
bch2_bpos_to_text(&buf2, bp.v->min_key);
|
||||
panic("expected next min_key %s got %s\n", buf1.buf, buf2.buf);
|
||||
struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
|
||||
|
||||
struct bpos expected_min = bkey_deleted(&prev.k->k)
|
||||
? node_min
|
||||
: bpos_successor(prev.k->k.p);
|
||||
|
||||
if (!bpos_eq(expected_min, bp.v->min_key)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
printbuf_reset(&buf);
|
||||
prt_str(&buf, "end of prev node doesn't match start of next node\n"),
|
||||
prt_printf(&buf, " in btree %s level %u node ",
|
||||
bch2_btree_id_str(b->c.btree_id), b->c.level);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
|
||||
prt_str(&buf, "\n prev ");
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
|
||||
prt_str(&buf, "\n next ");
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
|
||||
need_fsck_err(c, btree_node_topology_bad_min_key, "%s", buf.buf);
|
||||
goto topology_repair;
|
||||
}
|
||||
|
||||
bch2_btree_node_iter_advance(&iter, b);
|
||||
|
||||
if (bch2_btree_node_iter_end(&iter)) {
|
||||
if (!bpos_eq(k.k->p, b->key.k.p)) {
|
||||
bch2_dump_btree_node(c, b);
|
||||
bch2_bpos_to_text(&buf1, b->key.k.p);
|
||||
bch2_bpos_to_text(&buf2, k.k->p);
|
||||
panic("expected end %s got %s\n", buf1.buf, buf2.buf);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
next_node = bpos_successor(k.k->p);
|
||||
bch2_bkey_buf_reassemble(&prev, c, k);
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (bkey_deleted(&prev.k->k)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
printbuf_reset(&buf);
|
||||
prt_str(&buf, "empty interior node\n");
|
||||
prt_printf(&buf, " in btree %s level %u node ",
|
||||
bch2_btree_id_str(b->c.btree_id), b->c.level);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
|
||||
|
||||
need_fsck_err(c, btree_node_topology_empty_interior_node, "%s", buf.buf);
|
||||
goto topology_repair;
|
||||
} else if (!bpos_eq(prev.k->k.p, b->key.k.p)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
printbuf_reset(&buf);
|
||||
prt_str(&buf, "last child node doesn't end at end of parent node\n");
|
||||
prt_printf(&buf, " in btree %s level %u node ",
|
||||
bch2_btree_id_str(b->c.btree_id), b->c.level);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
|
||||
prt_str(&buf, "\n last key ");
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
|
||||
|
||||
need_fsck_err(c, btree_node_topology_bad_max_key, "%s", buf.buf);
|
||||
goto topology_repair;
|
||||
}
|
||||
out:
|
||||
fsck_err:
|
||||
bch2_btree_and_journal_iter_exit(&iter);
|
||||
bch2_bkey_buf_exit(&prev, c);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
topology_repair:
|
||||
if ((c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
|
||||
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) {
|
||||
bch2_inconsistent_error(c);
|
||||
ret = -BCH_ERR_btree_need_topology_repair;
|
||||
} else {
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Calculate ideal packed bkey format for new btree nodes: */
|
||||
@ -1380,9 +1429,16 @@ static void __btree_split_node(struct btree_update *as,
|
||||
if (bkey_deleted(k))
|
||||
continue;
|
||||
|
||||
uk = bkey_unpack_key(b, k);
|
||||
|
||||
if (b->c.level &&
|
||||
u64s < n1_u64s &&
|
||||
u64s + k->u64s >= n1_u64s &&
|
||||
bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p))
|
||||
n1_u64s += k->u64s;
|
||||
|
||||
i = u64s >= n1_u64s;
|
||||
u64s += k->u64s;
|
||||
uk = bkey_unpack_key(b, k);
|
||||
if (!i)
|
||||
n1_pos = uk.p;
|
||||
bch2_bkey_format_add_key(&format[i], &uk);
|
||||
@ -1441,8 +1497,7 @@ static void __btree_split_node(struct btree_update *as,
|
||||
|
||||
bch2_verify_btree_nr_keys(n[i]);
|
||||
|
||||
if (b->c.level)
|
||||
btree_node_interior_verify(as->c, n[i]);
|
||||
BUG_ON(bch2_btree_node_check_topology(trans, n[i]));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1473,7 +1528,7 @@ static void btree_split_insert_keys(struct btree_update *as,
|
||||
|
||||
__bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
|
||||
|
||||
btree_node_interior_verify(as->c, b);
|
||||
BUG_ON(bch2_btree_node_check_topology(trans, b));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1488,9 +1543,14 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
|
||||
u64 start_time = local_clock();
|
||||
int ret = 0;
|
||||
|
||||
bch2_verify_btree_nr_keys(b);
|
||||
BUG_ON(!parent && (b != btree_node_root(c, b)));
|
||||
BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1));
|
||||
|
||||
ret = bch2_btree_node_check_topology(trans, b);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bch2_btree_interior_update_will_free_node(as, b);
|
||||
|
||||
if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
|
||||
@ -1710,7 +1770,11 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
|
||||
goto split;
|
||||
}
|
||||
|
||||
btree_node_interior_verify(c, b);
|
||||
ret = bch2_btree_node_check_topology(trans, b);
|
||||
if (ret) {
|
||||
bch2_btree_node_unlock_write(trans, path, b);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bch2_btree_insert_keys_interior(as, trans, path, b, keys);
|
||||
|
||||
@ -1728,7 +1792,7 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
|
||||
|
||||
bch2_btree_node_unlock_write(trans, path, b);
|
||||
|
||||
btree_node_interior_verify(c, b);
|
||||
BUG_ON(bch2_btree_node_check_topology(trans, b));
|
||||
return 0;
|
||||
split:
|
||||
/*
|
||||
@ -1818,9 +1882,12 @@ int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree *b = bch2_btree_id_root(c, trans->paths[path].btree_id)->b;
|
||||
|
||||
if (btree_node_fake(b))
|
||||
return bch2_btree_split_leaf(trans, path, flags);
|
||||
|
||||
struct btree_update *as =
|
||||
bch2_btree_update_start(trans, trans->paths + path,
|
||||
b->c.level, true, flags);
|
||||
bch2_btree_update_start(trans, trans->paths + path, b->c.level, true, flags);
|
||||
if (IS_ERR(as))
|
||||
return PTR_ERR(as);
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
|
||||
|
||||
int bch2_btree_node_check_topology(struct btree_trans *, struct btree *);
|
||||
|
||||
/*
|
||||
* Tracks an in progress split/rewrite of a btree node and the update to the
|
||||
* parent node:
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "journal_reclaim.h"
|
||||
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
static int bch2_btree_write_buffer_journal_flush(struct journal *,
|
||||
struct journal_entry_pin *, u64);
|
||||
@ -46,6 +47,14 @@ static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_ke
|
||||
#endif
|
||||
}
|
||||
|
||||
static int wb_key_seq_cmp(const void *_l, const void *_r)
|
||||
{
|
||||
const struct btree_write_buffered_key *l = _l;
|
||||
const struct btree_write_buffered_key *r = _r;
|
||||
|
||||
return cmp_int(l->journal_seq, r->journal_seq);
|
||||
}
|
||||
|
||||
/* Compare excluding idx, the low 24 bits: */
|
||||
static inline bool wb_key_eq(const void *_l, const void *_r)
|
||||
{
|
||||
@ -357,6 +366,11 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
|
||||
*/
|
||||
trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
|
||||
|
||||
sort(wb->flushing.keys.data,
|
||||
wb->flushing.keys.nr,
|
||||
sizeof(wb->flushing.keys.data[0]),
|
||||
wb_key_seq_cmp, NULL);
|
||||
|
||||
darray_for_each(wb->flushing.keys, i) {
|
||||
if (!i->journal_seq)
|
||||
continue;
|
||||
|
@ -525,6 +525,7 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
"different types of data in same bucket: %s, %s",
|
||||
bch2_data_type_str(g->data_type),
|
||||
bch2_data_type_str(data_type))) {
|
||||
BUG();
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
@ -628,6 +629,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
|
||||
bch2_data_type_str(ptr_data_type),
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
||||
BUG();
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
@ -815,14 +817,14 @@ static int __mark_pointer(struct btree_trans *trans,
|
||||
static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, struct extent_ptr_decoded p,
|
||||
s64 *sectors,
|
||||
unsigned flags)
|
||||
const union bch_extent_entry *entry,
|
||||
s64 *sectors, unsigned flags)
|
||||
{
|
||||
bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
|
||||
struct bpos bucket;
|
||||
struct bch_backpointer bp;
|
||||
|
||||
bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
|
||||
bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, entry, &bucket, &bp);
|
||||
*sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
@ -851,7 +853,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
@ -979,7 +981,7 @@ static int __trigger_extent(struct btree_trans *trans,
|
||||
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
||||
s64 disk_sectors;
|
||||
ret = bch2_trigger_pointer(trans, btree_id, level, k, p, &disk_sectors, flags);
|
||||
ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "chardev.h"
|
||||
#include "journal.h"
|
||||
#include "move.h"
|
||||
#include "recovery.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "replicas.h"
|
||||
#include "super.h"
|
||||
#include "super-io.h"
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "move.h"
|
||||
#include "nocow_locking.h"
|
||||
#include "rebalance.h"
|
||||
#include "snapshot.h"
|
||||
#include "subvolume.h"
|
||||
#include "trace.h"
|
||||
|
||||
@ -509,6 +510,14 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||
unsigned ptrs_locked = 0;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* fs is corrupt we have a key for a snapshot node that doesn't exist,
|
||||
* and we have to check for this because we go rw before repairing the
|
||||
* snapshots table - just skip it, we can move it later.
|
||||
*/
|
||||
if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
|
||||
return -BCH_ERR_data_update_done;
|
||||
|
||||
bch2_bkey_buf_init(&m->k);
|
||||
bch2_bkey_buf_reassemble(&m->k, c, k);
|
||||
m->btree_id = btree_id;
|
||||
|
@ -252,7 +252,8 @@
|
||||
x(BCH_ERR_nopromote, nopromote_in_flight) \
|
||||
x(BCH_ERR_nopromote, nopromote_no_writes) \
|
||||
x(BCH_ERR_nopromote, nopromote_enomem) \
|
||||
x(0, need_inode_lock)
|
||||
x(0, need_inode_lock) \
|
||||
x(0, invalid_snapshot_node)
|
||||
|
||||
enum bch_errcode {
|
||||
BCH_ERR_START = 2048,
|
||||
|
@ -1,7 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "bcachefs.h"
|
||||
#include "error.h"
|
||||
#include "recovery.h"
|
||||
#include "journal.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "super.h"
|
||||
#include "thread_with_file.h"
|
||||
|
||||
@ -16,7 +17,8 @@ bool bch2_inconsistent_error(struct bch_fs *c)
|
||||
return false;
|
||||
case BCH_ON_ERROR_ro:
|
||||
if (bch2_fs_emergency_read_only(c))
|
||||
bch_err(c, "inconsistency detected - emergency read only");
|
||||
bch_err(c, "inconsistency detected - emergency read only at journal seq %llu",
|
||||
journal_cur_seq(&c->journal));
|
||||
return true;
|
||||
case BCH_ON_ERROR_panic:
|
||||
panic(bch2_fmt(c, "panic after error"));
|
||||
|
@ -32,6 +32,12 @@ bool bch2_inconsistent_error(struct bch_fs *);
|
||||
|
||||
int bch2_topology_error(struct bch_fs *);
|
||||
|
||||
#define bch2_fs_topology_error(c, ...) \
|
||||
({ \
|
||||
bch_err(c, "btree topology error: " __VA_ARGS__); \
|
||||
bch2_topology_error(c); \
|
||||
})
|
||||
|
||||
#define bch2_fs_inconsistent(c, ...) \
|
||||
({ \
|
||||
bch_err(c, __VA_ARGS__); \
|
||||
|
@ -189,13 +189,18 @@ int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
|
||||
enum bkey_invalid_flags flags,
|
||||
struct printbuf *err)
|
||||
{
|
||||
struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
|
||||
int ret = 0;
|
||||
|
||||
bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err,
|
||||
btree_ptr_v2_val_too_big,
|
||||
bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
|
||||
c, err, btree_ptr_v2_val_too_big,
|
||||
"value too big (%zu > %zu)",
|
||||
bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
|
||||
|
||||
bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
|
||||
c, err, btree_ptr_v2_min_key_bad,
|
||||
"min_key > key");
|
||||
|
||||
ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
@ -596,30 +596,6 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
|
||||
{
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_btree_ptr:
|
||||
case KEY_TYPE_btree_ptr_v2:
|
||||
return BCH_DATA_btree;
|
||||
case KEY_TYPE_extent:
|
||||
case KEY_TYPE_reflink_v:
|
||||
return BCH_DATA_user;
|
||||
case KEY_TYPE_stripe: {
|
||||
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
|
||||
|
||||
BUG_ON(ptr < s.v->ptrs ||
|
||||
ptr >= s.v->ptrs + s.v->nr_blocks);
|
||||
|
||||
return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
|
||||
? BCH_DATA_parity
|
||||
: BCH_DATA_user;
|
||||
}
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
|
||||
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
|
||||
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
|
||||
|
@ -536,7 +536,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
|
||||
if (likely(!dio->iter.count) || dio->op.error)
|
||||
break;
|
||||
|
||||
bio_reset(bio, NULL, REQ_OP_WRITE);
|
||||
bio_reset(bio, NULL, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
|
||||
}
|
||||
out:
|
||||
return bch2_dio_write_done(dio);
|
||||
@ -618,7 +618,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
|
||||
|
||||
bio = bio_alloc_bioset(NULL,
|
||||
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
|
||||
REQ_OP_WRITE,
|
||||
REQ_OP_WRITE | REQ_SYNC | REQ_IDLE,
|
||||
GFP_KERNEL,
|
||||
&c->dio_write_bioset);
|
||||
dio = container_of(bio, struct dio_write, op.wbio.bio);
|
||||
|
@ -1997,6 +1997,7 @@ out:
|
||||
return dget(sb->s_root);
|
||||
|
||||
err_put_super:
|
||||
__bch2_fs_stop(c);
|
||||
deactivate_locked_super(sb);
|
||||
return ERR_PTR(bch2_err_class(ret));
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "fsck.h"
|
||||
#include "inode.h"
|
||||
#include "keylist.h"
|
||||
#include "recovery.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "snapshot.h"
|
||||
#include "super.h"
|
||||
#include "xattr.h"
|
||||
@ -158,9 +158,10 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
|
||||
|
||||
ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
|
||||
&dir_hash_info, &iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
|
||||
&dir_hash_info, &iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
err:
|
||||
bch_err_fn(c, ret);
|
||||
@ -1371,10 +1372,6 @@ static int check_overlapping_extents(struct btree_trans *trans,
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = extent_ends_at(c, extent_ends, seen, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
extent_ends->last_pos = k.k->p;
|
||||
err:
|
||||
return ret;
|
||||
@ -1504,6 +1501,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
|
||||
i->seen_this_pos = true;
|
||||
}
|
||||
|
||||
if (k.k->type != KEY_TYPE_whiteout) {
|
||||
ret = extent_ends_at(c, extent_ends, s, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
out:
|
||||
err:
|
||||
fsck_err:
|
||||
@ -2098,17 +2101,21 @@ static int check_root_trans(struct btree_trans *trans)
|
||||
|
||||
if (mustfix_fsck_err_on(ret, c, root_subvol_missing,
|
||||
"root subvol missing")) {
|
||||
struct bkey_i_subvolume root_subvol;
|
||||
struct bkey_i_subvolume *root_subvol =
|
||||
bch2_trans_kmalloc(trans, sizeof(*root_subvol));
|
||||
ret = PTR_ERR_OR_ZERO(root_subvol);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
snapshot = U32_MAX;
|
||||
inum = BCACHEFS_ROOT_INO;
|
||||
|
||||
bkey_subvolume_init(&root_subvol.k_i);
|
||||
root_subvol.k.p.offset = BCACHEFS_ROOT_SUBVOL;
|
||||
root_subvol.v.flags = 0;
|
||||
root_subvol.v.snapshot = cpu_to_le32(snapshot);
|
||||
root_subvol.v.inode = cpu_to_le64(inum);
|
||||
ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol.k_i, 0);
|
||||
bkey_subvolume_init(&root_subvol->k_i);
|
||||
root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL;
|
||||
root_subvol->v.flags = 0;
|
||||
root_subvol->v.snapshot = cpu_to_le32(snapshot);
|
||||
root_subvol->v.inode = cpu_to_le64(inum);
|
||||
ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0);
|
||||
bch_err_msg(c, ret, "writing root subvol");
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -552,8 +552,8 @@ static void __bch2_inode_unpacked_to_text(struct printbuf *out,
|
||||
prt_printf(out, "bi_sectors=%llu", inode->bi_sectors);
|
||||
prt_newline(out);
|
||||
|
||||
prt_newline(out);
|
||||
prt_printf(out, "bi_version=%llu", inode->bi_version);
|
||||
prt_newline(out);
|
||||
|
||||
#define x(_name, _bits) \
|
||||
prt_printf(out, #_name "=%llu", (u64) inode->_name); \
|
||||
|
@ -264,6 +264,7 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
|
||||
ret = 0;
|
||||
err:
|
||||
bch2_logged_op_finish(trans, op_k);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -476,6 +477,7 @@ case LOGGED_OP_FINSERT_finish:
|
||||
break;
|
||||
}
|
||||
err:
|
||||
bch_err_fn(c, ret);
|
||||
bch2_logged_op_finish(trans, op_k);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
|
@ -37,7 +37,6 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
|
||||
const struct bch_logged_op_fn *fn = logged_op_fn(k.k->type);
|
||||
struct bkey_buf sk;
|
||||
u32 restart_count = trans->restart_count;
|
||||
int ret;
|
||||
|
||||
if (!fn)
|
||||
return 0;
|
||||
@ -45,11 +44,11 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
|
||||
ret = drop_locks_do(trans, (bch2_fs_lazy_rw(c), 0)) ?:
|
||||
fn->resume(trans, sk.k) ?: trans_was_restarted(trans, restart_count);
|
||||
fn->resume(trans, sk.k);
|
||||
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
return ret;
|
||||
|
||||
return trans_was_restarted(trans, restart_count);
|
||||
}
|
||||
|
||||
int bch2_resume_logged_ops(struct bch_fs *c)
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "disk_groups.h"
|
||||
#include "error.h"
|
||||
#include "opts.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "super-io.h"
|
||||
#include "util.h"
|
||||
|
||||
@ -205,6 +206,9 @@ const struct bch_option bch2_opt_table[] = {
|
||||
#define OPT_STR(_choices) .type = BCH_OPT_STR, \
|
||||
.min = 0, .max = ARRAY_SIZE(_choices), \
|
||||
.choices = _choices
|
||||
#define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \
|
||||
.min = 0, .max = U64_MAX, \
|
||||
.choices = _choices
|
||||
#define OPT_FN(_fn) .type = BCH_OPT_FN, .fn = _fn
|
||||
|
||||
#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
|
||||
|
@ -362,7 +362,12 @@ enum fsck_err_opts {
|
||||
OPT_FS|OPT_MOUNT, \
|
||||
OPT_BOOL(), \
|
||||
BCH2_NO_SB_OPT, false, \
|
||||
NULL, "Don't replay the journal") \
|
||||
NULL, "Exit recovery immediately prior to journal replay")\
|
||||
x(recovery_pass_last, u8, \
|
||||
OPT_FS|OPT_MOUNT, \
|
||||
OPT_STR_NOLIMIT(bch2_recovery_passes), \
|
||||
BCH2_NO_SB_OPT, 0, \
|
||||
NULL, "Exit recovery after specified pass") \
|
||||
x(keep_journal, u8, \
|
||||
0, \
|
||||
OPT_BOOL(), \
|
||||
|
@ -1,35 +1,30 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "backpointers.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "alloc_background.h"
|
||||
#include "btree_gc.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_journal_iter.h"
|
||||
#include "btree_update.h"
|
||||
#include "btree_update_interior.h"
|
||||
#include "btree_io.h"
|
||||
#include "buckets.h"
|
||||
#include "dirent.h"
|
||||
#include "ec.h"
|
||||
#include "errcode.h"
|
||||
#include "error.h"
|
||||
#include "fs-common.h"
|
||||
#include "fsck.h"
|
||||
#include "journal_io.h"
|
||||
#include "journal_reclaim.h"
|
||||
#include "journal_seq_blacklist.h"
|
||||
#include "lru.h"
|
||||
#include "logged_ops.h"
|
||||
#include "move.h"
|
||||
#include "quota.h"
|
||||
#include "rebalance.h"
|
||||
#include "recovery.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "replicas.h"
|
||||
#include "sb-clean.h"
|
||||
#include "sb-downgrade.h"
|
||||
#include "snapshot.h"
|
||||
#include "subvolume.h"
|
||||
#include "super-io.h"
|
||||
|
||||
#include <linux/sort.h>
|
||||
@ -186,7 +181,7 @@ static int journal_sort_seq_cmp(const void *_l, const void *_r)
|
||||
return cmp_int(l->journal_seq, r->journal_seq);
|
||||
}
|
||||
|
||||
static int bch2_journal_replay(struct bch_fs *c)
|
||||
int bch2_journal_replay(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
DARRAY(struct journal_key *) keys_sorted = { 0 };
|
||||
@ -194,6 +189,7 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
u64 start_seq = c->journal_replay_seq_start;
|
||||
u64 end_seq = c->journal_replay_seq_start;
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
bool immediate_flush = false;
|
||||
int ret = 0;
|
||||
|
||||
if (keys->nr) {
|
||||
@ -215,6 +211,13 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
darray_for_each(*keys, k) {
|
||||
cond_resched();
|
||||
|
||||
/*
|
||||
* k->allocated means the key wasn't read in from the journal,
|
||||
* rather it was from early repair code
|
||||
*/
|
||||
if (k->allocated)
|
||||
immediate_flush = true;
|
||||
|
||||
/* Skip fastpath if we're low on space in the journal */
|
||||
ret = c->journal.watermark ? -1 :
|
||||
commit_do(trans, NULL, NULL,
|
||||
@ -266,7 +269,8 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
bch2_trans_put(trans);
|
||||
trans = NULL;
|
||||
|
||||
if (!c->opts.keep_journal)
|
||||
if (!c->opts.keep_journal &&
|
||||
c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay)
|
||||
bch2_journal_keys_put_initial(c);
|
||||
|
||||
replay_now_at(j, j->replay_journal_seq_end);
|
||||
@ -274,6 +278,12 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
|
||||
bch2_journal_set_replay_done(j);
|
||||
|
||||
/* if we did any repair, flush it immediately */
|
||||
if (immediate_flush) {
|
||||
bch2_journal_flush_all_pins(&c->journal);
|
||||
ret = bch2_journal_meta(&c->journal);
|
||||
}
|
||||
|
||||
if (keys->nr)
|
||||
bch2_journal_log_msg(c, "journal replay finished");
|
||||
err:
|
||||
@ -471,150 +481,6 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_initialize_subvolumes(struct bch_fs *c)
|
||||
{
|
||||
struct bkey_i_snapshot_tree root_tree;
|
||||
struct bkey_i_snapshot root_snapshot;
|
||||
struct bkey_i_subvolume root_volume;
|
||||
int ret;
|
||||
|
||||
bkey_snapshot_tree_init(&root_tree.k_i);
|
||||
root_tree.k.p.offset = 1;
|
||||
root_tree.v.master_subvol = cpu_to_le32(1);
|
||||
root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
|
||||
|
||||
bkey_snapshot_init(&root_snapshot.k_i);
|
||||
root_snapshot.k.p.offset = U32_MAX;
|
||||
root_snapshot.v.flags = 0;
|
||||
root_snapshot.v.parent = 0;
|
||||
root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
|
||||
root_snapshot.v.tree = cpu_to_le32(1);
|
||||
SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
|
||||
|
||||
bkey_subvolume_init(&root_volume.k_i);
|
||||
root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
|
||||
root_volume.v.flags = 0;
|
||||
root_volume.v.snapshot = cpu_to_le32(U32_MAX);
|
||||
root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_inode_unpacked inode;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!bkey_is_inode(k.k)) {
|
||||
bch_err(trans->c, "root inode not found");
|
||||
ret = -BCH_ERR_ENOENT_inode;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_inode_unpack(k, &inode);
|
||||
BUG_ON(ret);
|
||||
|
||||
inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
|
||||
|
||||
ret = bch2_inode_write(trans, &iter, &inode);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* set bi_subvol on root inode */
|
||||
noinline_for_stack
|
||||
static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
|
||||
__bch2_fs_upgrade_for_subvolumes(trans));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
const char * const bch2_recovery_passes[] = {
|
||||
#define x(_fn, ...) #_fn,
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
NULL
|
||||
};
|
||||
|
||||
static int bch2_check_allocations(struct bch_fs *c)
|
||||
{
|
||||
return bch2_gc(c, true, c->opts.norecovery);
|
||||
}
|
||||
|
||||
static int bch2_set_may_go_rw(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
|
||||
/*
|
||||
* After we go RW, the journal keys buffer can't be modified (except for
|
||||
* setting journal_key->overwritten: it will be accessed by multiple
|
||||
* threads
|
||||
*/
|
||||
move_gap(keys, keys->nr);
|
||||
|
||||
set_bit(BCH_FS_may_go_rw, &c->flags);
|
||||
|
||||
if (keys->nr || c->opts.fsck || !c->sb.clean)
|
||||
return bch2_fs_read_write_early(c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct recovery_pass_fn {
|
||||
int (*fn)(struct bch_fs *);
|
||||
unsigned when;
|
||||
};
|
||||
|
||||
static struct recovery_pass_fn recovery_pass_fns[] = {
|
||||
#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
};
|
||||
|
||||
u64 bch2_recovery_passes_to_stable(u64 v)
|
||||
{
|
||||
static const u8 map[] = {
|
||||
#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
};
|
||||
|
||||
u64 ret = 0;
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
|
||||
if (v & BIT_ULL(i))
|
||||
ret |= BIT_ULL(map[i]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
u64 bch2_recovery_passes_from_stable(u64 v)
|
||||
{
|
||||
static const u8 map[] = {
|
||||
#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
};
|
||||
|
||||
u64 ret = 0;
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
|
||||
if (v & BIT_ULL(i))
|
||||
ret |= BIT_ULL(map[i]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool check_version_upgrade(struct bch_fs *c)
|
||||
{
|
||||
unsigned latest_version = bcachefs_metadata_version_current;
|
||||
@ -687,96 +553,6 @@ static bool check_version_upgrade(struct bch_fs *c)
|
||||
return false;
|
||||
}
|
||||
|
||||
u64 bch2_fsck_recovery_passes(void)
|
||||
{
|
||||
u64 ret = 0;
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
|
||||
if (recovery_pass_fns[i].when & PASS_FSCK)
|
||||
ret |= BIT_ULL(i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
|
||||
{
|
||||
struct recovery_pass_fn *p = recovery_pass_fns + pass;
|
||||
|
||||
if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read)
|
||||
return false;
|
||||
if (c->recovery_passes_explicit & BIT_ULL(pass))
|
||||
return true;
|
||||
if ((p->when & PASS_FSCK) && c->opts.fsck)
|
||||
return true;
|
||||
if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
|
||||
return true;
|
||||
if (p->when & PASS_ALWAYS)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
|
||||
{
|
||||
struct recovery_pass_fn *p = recovery_pass_fns + pass;
|
||||
int ret;
|
||||
|
||||
if (!(p->when & PASS_SILENT))
|
||||
bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
|
||||
bch2_recovery_passes[pass]);
|
||||
ret = p->fn(c);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!(p->when & PASS_SILENT))
|
||||
bch2_print(c, KERN_CONT " done\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_run_recovery_passes(struct bch_fs *c)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
|
||||
if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
|
||||
unsigned pass = c->curr_recovery_pass;
|
||||
|
||||
ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
|
||||
if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
|
||||
(ret && c->curr_recovery_pass < pass))
|
||||
continue;
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
|
||||
}
|
||||
c->curr_recovery_pass++;
|
||||
c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_run_online_recovery_passes(struct bch_fs *c)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
|
||||
struct recovery_pass_fn *p = recovery_pass_fns + i;
|
||||
|
||||
if (!(p->when & PASS_ONLINE))
|
||||
continue;
|
||||
|
||||
ret = bch2_run_recovery_pass(c, i);
|
||||
if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
|
||||
i = c->curr_recovery_pass;
|
||||
continue;
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_fs_recovery(struct bch_fs *c)
|
||||
{
|
||||
struct bch_sb_field_clean *clean = NULL;
|
||||
@ -809,24 +585,14 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (c->opts.fsck && c->opts.norecovery) {
|
||||
bch_err(c, "cannot select both norecovery and fsck");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
if (c->opts.norecovery)
|
||||
c->opts.recovery_pass_last = BCH_RECOVERY_PASS_journal_replay - 1;
|
||||
|
||||
if (!c->opts.nochanges) {
|
||||
mutex_lock(&c->sb_lock);
|
||||
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
|
||||
bool write_sb = false;
|
||||
|
||||
struct bch_sb_field_ext *ext =
|
||||
bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
|
||||
if (!ext) {
|
||||
ret = -BCH_ERR_ENOSPC_sb;
|
||||
mutex_unlock(&c->sb_lock);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
|
||||
ext->recovery_passes_required[0] |=
|
||||
cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
|
||||
@ -1017,6 +783,12 @@ use_clean:
|
||||
|
||||
clear_bit(BCH_FS_fsck_running, &c->flags);
|
||||
|
||||
/* fsync if we fixed errors */
|
||||
if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
|
||||
bch2_journal_flush_all_pins(&c->journal);
|
||||
bch2_journal_meta(&c->journal);
|
||||
}
|
||||
|
||||
/* If we fixed errors, verify that fs is actually clean now: */
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
|
||||
test_bit(BCH_FS_errors_fixed, &c->flags) &&
|
||||
@ -1051,6 +823,7 @@ use_clean:
|
||||
}
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
|
||||
bool write_sb = false;
|
||||
|
||||
if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
|
||||
@ -1064,15 +837,12 @@ use_clean:
|
||||
write_sb = true;
|
||||
}
|
||||
|
||||
if (!test_bit(BCH_FS_error, &c->flags)) {
|
||||
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
|
||||
if (ext &&
|
||||
(!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) ||
|
||||
!bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) {
|
||||
memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required));
|
||||
memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
|
||||
write_sb = true;
|
||||
}
|
||||
if (!test_bit(BCH_FS_error, &c->flags) &&
|
||||
(!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) ||
|
||||
!bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) {
|
||||
memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required));
|
||||
memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
|
||||
write_sb = true;
|
||||
}
|
||||
|
||||
if (c->opts.fsck &&
|
||||
@ -1155,7 +925,7 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
}
|
||||
mutex_unlock(&c->sb_lock);
|
||||
|
||||
c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns);
|
||||
c->curr_recovery_pass = BCH_RECOVERY_PASS_NR;
|
||||
set_bit(BCH_FS_may_go_rw, &c->flags);
|
||||
|
||||
for (unsigned i = 0; i < BTREE_ID_NR; i++)
|
||||
@ -1230,7 +1000,7 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
c->recovery_pass_done = ARRAY_SIZE(recovery_pass_fns) - 1;
|
||||
c->recovery_pass_done = BCH_RECOVERY_PASS_NR - 1;
|
||||
|
||||
if (enabled_qtypes(c)) {
|
||||
ret = bch2_fs_quota_read(c);
|
||||
|
@ -2,37 +2,7 @@
|
||||
#ifndef _BCACHEFS_RECOVERY_H
|
||||
#define _BCACHEFS_RECOVERY_H
|
||||
|
||||
extern const char * const bch2_recovery_passes[];
|
||||
|
||||
u64 bch2_recovery_passes_to_stable(u64 v);
|
||||
u64 bch2_recovery_passes_from_stable(u64 v);
|
||||
|
||||
/*
|
||||
* For when we need to rewind recovery passes and run a pass we skipped:
|
||||
*/
|
||||
static inline int bch2_run_explicit_recovery_pass(struct bch_fs *c,
|
||||
enum bch_recovery_pass pass)
|
||||
{
|
||||
if (c->recovery_passes_explicit & BIT_ULL(pass))
|
||||
return 0;
|
||||
|
||||
bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
|
||||
bch2_recovery_passes[pass], pass,
|
||||
bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
|
||||
|
||||
c->recovery_passes_explicit |= BIT_ULL(pass);
|
||||
|
||||
if (c->curr_recovery_pass >= pass) {
|
||||
c->curr_recovery_pass = pass;
|
||||
c->recovery_passes_complete &= (1ULL << pass) >> 1;
|
||||
return -BCH_ERR_restart_recovery;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int bch2_run_online_recovery_passes(struct bch_fs *);
|
||||
u64 bch2_fsck_recovery_passes(void);
|
||||
int bch2_journal_replay(struct bch_fs *);
|
||||
|
||||
int bch2_fs_recovery(struct bch_fs *);
|
||||
int bch2_fs_initialize(struct bch_fs *);
|
||||
|
225
fs/bcachefs/recovery_passes.c
Normal file
225
fs/bcachefs/recovery_passes.c
Normal file
@ -0,0 +1,225 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_background.h"
|
||||
#include "backpointers.h"
|
||||
#include "btree_gc.h"
|
||||
#include "ec.h"
|
||||
#include "fsck.h"
|
||||
#include "inode.h"
|
||||
#include "journal.h"
|
||||
#include "lru.h"
|
||||
#include "logged_ops.h"
|
||||
#include "rebalance.h"
|
||||
#include "recovery.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "snapshot.h"
|
||||
#include "subvolume.h"
|
||||
#include "super.h"
|
||||
#include "super-io.h"
|
||||
|
||||
const char * const bch2_recovery_passes[] = {
|
||||
#define x(_fn, ...) #_fn,
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
NULL
|
||||
};
|
||||
|
||||
static int bch2_check_allocations(struct bch_fs *c)
|
||||
{
|
||||
return bch2_gc(c, true, false);
|
||||
}
|
||||
|
||||
static int bch2_set_may_go_rw(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
|
||||
/*
|
||||
* After we go RW, the journal keys buffer can't be modified (except for
|
||||
* setting journal_key->overwritten: it will be accessed by multiple
|
||||
* threads
|
||||
*/
|
||||
move_gap(keys, keys->nr);
|
||||
|
||||
set_bit(BCH_FS_may_go_rw, &c->flags);
|
||||
|
||||
if (keys->nr || c->opts.fsck || !c->sb.clean)
|
||||
return bch2_fs_read_write_early(c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct recovery_pass_fn {
|
||||
int (*fn)(struct bch_fs *);
|
||||
unsigned when;
|
||||
};
|
||||
|
||||
static struct recovery_pass_fn recovery_pass_fns[] = {
|
||||
#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
};
|
||||
|
||||
u64 bch2_recovery_passes_to_stable(u64 v)
|
||||
{
|
||||
static const u8 map[] = {
|
||||
#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
};
|
||||
|
||||
u64 ret = 0;
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
|
||||
if (v & BIT_ULL(i))
|
||||
ret |= BIT_ULL(map[i]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
u64 bch2_recovery_passes_from_stable(u64 v)
|
||||
{
|
||||
static const u8 map[] = {
|
||||
#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
};
|
||||
|
||||
u64 ret = 0;
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
|
||||
if (v & BIT_ULL(i))
|
||||
ret |= BIT_ULL(map[i]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* For when we need to rewind recovery passes and run a pass we skipped:
|
||||
*/
|
||||
int bch2_run_explicit_recovery_pass(struct bch_fs *c,
|
||||
enum bch_recovery_pass pass)
|
||||
{
|
||||
if (c->recovery_passes_explicit & BIT_ULL(pass))
|
||||
return 0;
|
||||
|
||||
bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
|
||||
bch2_recovery_passes[pass], pass,
|
||||
bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
|
||||
|
||||
c->recovery_passes_explicit |= BIT_ULL(pass);
|
||||
|
||||
if (c->curr_recovery_pass >= pass) {
|
||||
c->curr_recovery_pass = pass;
|
||||
c->recovery_passes_complete &= (1ULL << pass) >> 1;
|
||||
return -BCH_ERR_restart_recovery;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c,
|
||||
enum bch_recovery_pass pass)
|
||||
{
|
||||
__le64 s = cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(pass)));
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
|
||||
|
||||
if (!(ext->recovery_passes_required[0] & s)) {
|
||||
ext->recovery_passes_required[0] |= s;
|
||||
bch2_write_super(c);
|
||||
}
|
||||
mutex_unlock(&c->sb_lock);
|
||||
|
||||
return bch2_run_explicit_recovery_pass(c, pass);
|
||||
}
|
||||
|
||||
u64 bch2_fsck_recovery_passes(void)
|
||||
{
|
||||
u64 ret = 0;
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
|
||||
if (recovery_pass_fns[i].when & PASS_FSCK)
|
||||
ret |= BIT_ULL(i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
|
||||
{
|
||||
struct recovery_pass_fn *p = recovery_pass_fns + pass;
|
||||
|
||||
if (c->recovery_passes_explicit & BIT_ULL(pass))
|
||||
return true;
|
||||
if ((p->when & PASS_FSCK) && c->opts.fsck)
|
||||
return true;
|
||||
if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
|
||||
return true;
|
||||
if (p->when & PASS_ALWAYS)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
|
||||
{
|
||||
struct recovery_pass_fn *p = recovery_pass_fns + pass;
|
||||
int ret;
|
||||
|
||||
if (!(p->when & PASS_SILENT))
|
||||
bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
|
||||
bch2_recovery_passes[pass]);
|
||||
ret = p->fn(c);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!(p->when & PASS_SILENT))
|
||||
bch2_print(c, KERN_CONT " done\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_run_online_recovery_passes(struct bch_fs *c)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
|
||||
struct recovery_pass_fn *p = recovery_pass_fns + i;
|
||||
|
||||
if (!(p->when & PASS_ONLINE))
|
||||
continue;
|
||||
|
||||
ret = bch2_run_recovery_pass(c, i);
|
||||
if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
|
||||
i = c->curr_recovery_pass;
|
||||
continue;
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_run_recovery_passes(struct bch_fs *c)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
|
||||
if (c->opts.recovery_pass_last &&
|
||||
c->curr_recovery_pass > c->opts.recovery_pass_last)
|
||||
break;
|
||||
|
||||
if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
|
||||
unsigned pass = c->curr_recovery_pass;
|
||||
|
||||
ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
|
||||
if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
|
||||
(ret && c->curr_recovery_pass < pass))
|
||||
continue;
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
|
||||
}
|
||||
|
||||
c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
|
||||
|
||||
c->curr_recovery_pass++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
17
fs/bcachefs/recovery_passes.h
Normal file
17
fs/bcachefs/recovery_passes.h
Normal file
@ -0,0 +1,17 @@
|
||||
#ifndef _BCACHEFS_RECOVERY_PASSES_H
|
||||
#define _BCACHEFS_RECOVERY_PASSES_H
|
||||
|
||||
extern const char * const bch2_recovery_passes[];
|
||||
|
||||
u64 bch2_recovery_passes_to_stable(u64 v);
|
||||
u64 bch2_recovery_passes_from_stable(u64 v);
|
||||
|
||||
u64 bch2_fsck_recovery_passes(void);
|
||||
|
||||
int bch2_run_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass);
|
||||
int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *, enum bch_recovery_pass);
|
||||
|
||||
int bch2_run_online_recovery_passes(struct bch_fs *);
|
||||
int bch2_run_recovery_passes(struct bch_fs *);
|
||||
|
||||
#endif /* _BCACHEFS_RECOVERY_PASSES_H */
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _BCACHEFS_RECOVERY_TYPES_H
|
||||
#define _BCACHEFS_RECOVERY_TYPES_H
|
||||
#ifndef _BCACHEFS_RECOVERY_PASSES_TYPES_H
|
||||
#define _BCACHEFS_RECOVERY_PASSES_TYPES_H
|
||||
|
||||
#define PASS_SILENT BIT(0)
|
||||
#define PASS_FSCK BIT(1)
|
||||
@ -37,7 +37,6 @@
|
||||
x(check_subvol_children, 35, PASS_ONLINE|PASS_FSCK) \
|
||||
x(delete_dead_snapshots, 21, PASS_ONLINE|PASS_FSCK) \
|
||||
x(fs_upgrade_for_subvolumes, 22, 0) \
|
||||
x(resume_logged_ops, 23, PASS_ALWAYS) \
|
||||
x(check_inodes, 24, PASS_FSCK) \
|
||||
x(check_extents, 25, PASS_FSCK) \
|
||||
x(check_indirect_extents, 26, PASS_FSCK) \
|
||||
@ -47,6 +46,7 @@
|
||||
x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \
|
||||
x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \
|
||||
x(check_nlinks, 31, PASS_FSCK) \
|
||||
x(resume_logged_ops, 23, PASS_ALWAYS) \
|
||||
x(delete_dead_inodes, 32, PASS_FSCK|PASS_UNCLEAN) \
|
||||
x(fix_reflink_p, 33, 0) \
|
||||
x(set_fs_needs_rebalance, 34, 0) \
|
||||
@ -56,6 +56,7 @@ enum bch_recovery_pass {
|
||||
#define x(n, id, when) BCH_RECOVERY_PASS_##n,
|
||||
BCH_RECOVERY_PASSES()
|
||||
#undef x
|
||||
BCH_RECOVERY_PASS_NR
|
||||
};
|
||||
|
||||
/* But we also need stable identifiers that can be used in the superblock */
|
||||
@ -65,4 +66,4 @@ enum bch_recovery_pass_stable {
|
||||
#undef x
|
||||
};
|
||||
|
||||
#endif /* _BCACHEFS_RECOVERY_TYPES_H */
|
||||
#endif /* _BCACHEFS_RECOVERY_PASSES_TYPES_H */
|
@ -185,8 +185,7 @@ not_found:
|
||||
} else {
|
||||
bkey_error_init(update);
|
||||
update->k.p = p.k->p;
|
||||
update->k.p.offset = next_idx;
|
||||
update->k.size = next_idx - *idx;
|
||||
update->k.size = p.k->size;
|
||||
set_bkey_val_u64s(&update->k, 0);
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "darray.h"
|
||||
#include "recovery.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "sb-downgrade.h"
|
||||
#include "sb-errors.h"
|
||||
#include "super-io.h"
|
||||
|
@ -265,7 +265,9 @@
|
||||
x(subvol_children_bad, 257) \
|
||||
x(subvol_loop, 258) \
|
||||
x(subvol_unreachable, 259) \
|
||||
x(btree_node_bkey_bad_u64s, 260)
|
||||
x(btree_node_bkey_bad_u64s, 260) \
|
||||
x(btree_node_topology_empty_interior_node, 261) \
|
||||
x(btree_ptr_v2_min_key_bad, 262)
|
||||
|
||||
enum bch_sb_error_id {
|
||||
#define x(t, n) BCH_FSCK_ERR_##t = n,
|
||||
|
@ -93,8 +93,10 @@ static int bch2_snapshot_tree_create(struct btree_trans *trans,
|
||||
|
||||
static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id, u32 ancestor)
|
||||
{
|
||||
while (id && id < ancestor)
|
||||
id = __snapshot_t(t, id)->parent;
|
||||
while (id && id < ancestor) {
|
||||
const struct snapshot_t *s = __snapshot_t(t, id);
|
||||
id = s ? s->parent : 0;
|
||||
}
|
||||
return id == ancestor;
|
||||
}
|
||||
|
||||
@ -110,6 +112,8 @@ static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancest
|
||||
static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
|
||||
{
|
||||
const struct snapshot_t *s = __snapshot_t(t, id);
|
||||
if (!s)
|
||||
return 0;
|
||||
|
||||
if (s->skip[2] <= ancestor)
|
||||
return s->skip[2];
|
||||
@ -127,7 +131,7 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
|
||||
rcu_read_lock();
|
||||
struct snapshot_table *t = rcu_dereference(c->snapshots);
|
||||
|
||||
if (unlikely(c->recovery_pass_done <= BCH_RECOVERY_PASS_check_snapshots)) {
|
||||
if (unlikely(c->recovery_pass_done < BCH_RECOVERY_PASS_check_snapshots)) {
|
||||
ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor);
|
||||
goto out;
|
||||
}
|
||||
@ -151,36 +155,39 @@ out:
|
||||
static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
|
||||
{
|
||||
size_t idx = U32_MAX - id;
|
||||
size_t new_size;
|
||||
struct snapshot_table *new, *old;
|
||||
|
||||
new_size = max(16UL, roundup_pow_of_two(idx + 1));
|
||||
size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
|
||||
size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
|
||||
|
||||
new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
|
||||
new = kvzalloc(new_bytes, GFP_KERNEL);
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
||||
new->nr = new_size;
|
||||
|
||||
old = rcu_dereference_protected(c->snapshots, true);
|
||||
if (old)
|
||||
memcpy(new->s,
|
||||
rcu_dereference_protected(c->snapshots, true)->s,
|
||||
sizeof(new->s[0]) * c->snapshot_table_size);
|
||||
memcpy(new->s, old->s, sizeof(old->s[0]) * old->nr);
|
||||
|
||||
rcu_assign_pointer(c->snapshots, new);
|
||||
c->snapshot_table_size = new_size;
|
||||
kvfree_rcu_mightsleep(old);
|
||||
kvfree_rcu(old, rcu);
|
||||
|
||||
return &rcu_dereference_protected(c->snapshots, true)->s[idx];
|
||||
return &rcu_dereference_protected(c->snapshots,
|
||||
lockdep_is_held(&c->snapshot_table_lock))->s[idx];
|
||||
}
|
||||
|
||||
static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
|
||||
{
|
||||
size_t idx = U32_MAX - id;
|
||||
struct snapshot_table *table =
|
||||
rcu_dereference_protected(c->snapshots,
|
||||
lockdep_is_held(&c->snapshot_table_lock));
|
||||
|
||||
lockdep_assert_held(&c->snapshot_table_lock);
|
||||
|
||||
if (likely(idx < c->snapshot_table_size))
|
||||
return &rcu_dereference_protected(c->snapshots, true)->s[idx];
|
||||
if (likely(table && idx < table->nr))
|
||||
return &table->s[idx];
|
||||
|
||||
return __snapshot_t_mut(c, id);
|
||||
}
|
||||
|
@ -33,7 +33,11 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
|
||||
|
||||
static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
|
||||
{
|
||||
return &t->s[U32_MAX - id];
|
||||
u32 idx = U32_MAX - id;
|
||||
|
||||
return likely(t && idx < t->nr)
|
||||
? &t->s[idx]
|
||||
: NULL;
|
||||
}
|
||||
|
||||
static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
|
||||
@ -44,7 +48,8 @@ static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
|
||||
static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
|
||||
{
|
||||
rcu_read_lock();
|
||||
id = snapshot_t(c, id)->tree;
|
||||
const struct snapshot_t *s = snapshot_t(c, id);
|
||||
id = s ? s->tree : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
@ -52,7 +57,8 @@ static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
|
||||
|
||||
static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
|
||||
{
|
||||
return snapshot_t(c, id)->parent;
|
||||
const struct snapshot_t *s = snapshot_t(c, id);
|
||||
return s ? s->parent : 0;
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
|
||||
@ -66,19 +72,19 @@ static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
|
||||
|
||||
static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
|
||||
{
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
u32 parent = snapshot_t(c, id)->parent;
|
||||
const struct snapshot_t *s = snapshot_t(c, id);
|
||||
if (!s)
|
||||
return 0;
|
||||
|
||||
if (parent &&
|
||||
snapshot_t(c, id)->depth != snapshot_t(c, parent)->depth + 1)
|
||||
u32 parent = s->parent;
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBU) &&
|
||||
parent &&
|
||||
s->depth != snapshot_t(c, parent)->depth + 1)
|
||||
panic("id %u depth=%u parent %u depth=%u\n",
|
||||
id, snapshot_t(c, id)->depth,
|
||||
parent, snapshot_t(c, parent)->depth);
|
||||
|
||||
return parent;
|
||||
#else
|
||||
return snapshot_t(c, id)->parent;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
|
||||
@ -116,7 +122,8 @@ static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
|
||||
|
||||
static inline u32 __bch2_snapshot_equiv(struct bch_fs *c, u32 id)
|
||||
{
|
||||
return snapshot_t(c, id)->equiv;
|
||||
const struct snapshot_t *s = snapshot_t(c, id);
|
||||
return s ? s->equiv : 0;
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id)
|
||||
@ -133,38 +140,22 @@ static inline bool bch2_snapshot_is_equiv(struct bch_fs *c, u32 id)
|
||||
return id == bch2_snapshot_equiv(c, id);
|
||||
}
|
||||
|
||||
static inline bool bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
|
||||
static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
|
||||
{
|
||||
const struct snapshot_t *s;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
s = snapshot_t(c, id);
|
||||
ret = s->children[0];
|
||||
const struct snapshot_t *s = snapshot_t(c, id);
|
||||
int ret = s ? s->children[0] : -BCH_ERR_invalid_snapshot_node;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
|
||||
static inline int bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
|
||||
{
|
||||
return !bch2_snapshot_is_internal_node(c, id);
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_sibling(struct bch_fs *c, u32 id)
|
||||
{
|
||||
const struct snapshot_t *s;
|
||||
u32 parent = __bch2_snapshot_parent(c, id);
|
||||
|
||||
if (!parent)
|
||||
return 0;
|
||||
|
||||
s = snapshot_t(c, __bch2_snapshot_parent(c, id));
|
||||
if (id == s->children[0])
|
||||
return s->children[1];
|
||||
if (id == s->children[1])
|
||||
return s->children[0];
|
||||
return 0;
|
||||
int ret = bch2_snapshot_is_internal_node(c, id);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return !ret;
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_depth(struct bch_fs *c, u32 parent)
|
||||
@ -249,7 +240,7 @@ static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
|
||||
struct bpos pos)
|
||||
{
|
||||
if (!btree_type_has_snapshots(id) ||
|
||||
bch2_snapshot_is_leaf(trans->c, pos.snapshot))
|
||||
bch2_snapshot_is_leaf(trans->c, pos.snapshot) > 0)
|
||||
return 0;
|
||||
|
||||
return __bch2_key_has_snapshot_overwrites(trans, id, pos);
|
||||
|
@ -595,6 +595,78 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_initialize_subvolumes(struct bch_fs *c)
|
||||
{
|
||||
struct bkey_i_snapshot_tree root_tree;
|
||||
struct bkey_i_snapshot root_snapshot;
|
||||
struct bkey_i_subvolume root_volume;
|
||||
int ret;
|
||||
|
||||
bkey_snapshot_tree_init(&root_tree.k_i);
|
||||
root_tree.k.p.offset = 1;
|
||||
root_tree.v.master_subvol = cpu_to_le32(1);
|
||||
root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
|
||||
|
||||
bkey_snapshot_init(&root_snapshot.k_i);
|
||||
root_snapshot.k.p.offset = U32_MAX;
|
||||
root_snapshot.v.flags = 0;
|
||||
root_snapshot.v.parent = 0;
|
||||
root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
|
||||
root_snapshot.v.tree = cpu_to_le32(1);
|
||||
SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
|
||||
|
||||
bkey_subvolume_init(&root_volume.k_i);
|
||||
root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
|
||||
root_volume.v.flags = 0;
|
||||
root_volume.v.snapshot = cpu_to_le32(U32_MAX);
|
||||
root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_inode_unpacked inode;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!bkey_is_inode(k.k)) {
|
||||
bch_err(trans->c, "root inode not found");
|
||||
ret = -BCH_ERR_ENOENT_inode;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_inode_unpack(k, &inode);
|
||||
BUG_ON(ret);
|
||||
|
||||
inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
|
||||
|
||||
ret = bch2_inode_write(trans, &iter, &inode);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* set bi_subvol on root inode */
|
||||
int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
|
||||
__bch2_fs_upgrade_for_subvolumes(trans));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_fs_subvolumes_init(struct bch_fs *c)
|
||||
{
|
||||
INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
|
||||
|
@ -37,6 +37,9 @@ void bch2_delete_dead_snapshots_async(struct bch_fs *);
|
||||
int bch2_subvolume_unlink(struct btree_trans *, u32);
|
||||
int bch2_subvolume_create(struct btree_trans *, u64, u32, u32, u32 *, u32 *, bool);
|
||||
|
||||
int bch2_initialize_subvolumes(struct bch_fs *);
|
||||
int bch2_fs_upgrade_for_subvolumes(struct bch_fs *);
|
||||
|
||||
int bch2_fs_subvolumes_init(struct bch_fs *);
|
||||
|
||||
#endif /* _BCACHEFS_SUBVOLUME_H */
|
||||
|
@ -20,6 +20,8 @@ struct snapshot_t {
|
||||
};
|
||||
|
||||
struct snapshot_table {
|
||||
struct rcu_head rcu;
|
||||
size_t nr;
|
||||
#ifndef RUST_BINDGEN
|
||||
DECLARE_FLEX_ARRAY(struct snapshot_t, s);
|
||||
#else
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include "journal.h"
|
||||
#include "journal_sb.h"
|
||||
#include "journal_seq_blacklist.h"
|
||||
#include "recovery.h"
|
||||
#include "recovery_passes.h"
|
||||
#include "replicas.h"
|
||||
#include "quota.h"
|
||||
#include "sb-clean.h"
|
||||
|
@ -365,7 +365,7 @@ void bch2_fs_read_only(struct bch_fs *c)
|
||||
!test_bit(BCH_FS_emergency_ro, &c->flags) &&
|
||||
test_bit(BCH_FS_started, &c->flags) &&
|
||||
test_bit(BCH_FS_clean_shutdown, &c->flags) &&
|
||||
!c->opts.norecovery) {
|
||||
c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
|
||||
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
|
||||
BUG_ON(atomic_read(&c->btree_cache.dirty));
|
||||
BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
|
||||
@ -510,7 +510,8 @@ err:
|
||||
|
||||
int bch2_fs_read_write(struct bch_fs *c)
|
||||
{
|
||||
if (c->opts.norecovery)
|
||||
if (c->opts.recovery_pass_last &&
|
||||
c->opts.recovery_pass_last < BCH_RECOVERY_PASS_journal_replay)
|
||||
return -BCH_ERR_erofs_norecovery;
|
||||
|
||||
if (c->opts.nochanges)
|
||||
@ -1015,8 +1016,16 @@ int bch2_fs_start(struct bch_fs *c)
|
||||
for_each_online_member(c, ca)
|
||||
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
|
||||
|
||||
struct bch_sb_field_ext *ext =
|
||||
bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
|
||||
mutex_unlock(&c->sb_lock);
|
||||
|
||||
if (!ext) {
|
||||
bch_err(c, "insufficient space in superblock for sb_field_ext");
|
||||
ret = -BCH_ERR_ENOSPC_sb;
|
||||
goto err;
|
||||
}
|
||||
|
||||
for_each_rw_member(c, ca)
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
bch2_recalc_capacity(c);
|
||||
|
Loading…
Reference in New Issue
Block a user