mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-24 23:04:17 +08:00
bcachefs: member helper cleanups
Some renaming for better consistency bch2_member_exists -> bch2_member_alive bch2_dev_exists -> bch2_member_exists bch2_dev_exsits2 -> bch2_dev_exists bch_dev_locked -> bch2_dev_locked bch_dev_bkey_exists -> bch2_dev_bkey_exists new helper - bch2_dev_safe Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
d155272b6e
commit
2f724563fc
@ -576,10 +576,10 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
* bch2_check_alloc_key() which runs later:
|
||||
*/
|
||||
if (!bch2_dev_exists2(c, k.k->p.inode))
|
||||
if (!bch2_dev_exists(c, k.k->p.inode))
|
||||
continue;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, k.k->p.inode);
|
||||
|
||||
for (u64 b = max_t(u64, ca->mi.first_bucket, start);
|
||||
b < min_t(u64, ca->mi.nbuckets, end);
|
||||
@ -597,7 +597,7 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
if (!bch2_dev_bucket_exists(c, k.k->p))
|
||||
continue;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, k.k->p.inode);
|
||||
|
||||
struct bch_alloc_v4 a;
|
||||
*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
|
||||
@ -620,7 +620,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
|
||||
bool set)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, alloc_k.k->p.inode);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c old;
|
||||
struct bkey_i *k;
|
||||
@ -733,7 +733,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
"alloc key for invalid device or bucket"))
|
||||
return -EIO;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, new.k->p.inode);
|
||||
|
||||
struct bch_alloc_v4 old_a_convert;
|
||||
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
|
||||
@ -781,7 +781,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
|
||||
bch_dev_bkey_exists(c, new.k->p.inode));
|
||||
bch2_dev_bkey_exists(c, new.k->p.inode));
|
||||
if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
|
||||
ret = bch2_lru_change(trans,
|
||||
BCH_LRU_FRAGMENTATION_START,
|
||||
@ -955,8 +955,8 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
|
||||
if (bch2_dev_bucket_exists(c, *bucket))
|
||||
return true;
|
||||
|
||||
if (bch2_dev_exists2(c, bucket->inode)) {
|
||||
ca = bch_dev_bkey_exists(c, bucket->inode);
|
||||
if (bch2_dev_exists(c, bucket->inode)) {
|
||||
ca = bch2_dev_bkey_exists(c, bucket->inode);
|
||||
|
||||
if (bucket->offset < ca->mi.first_bucket) {
|
||||
bucket->offset = ca->mi.first_bucket;
|
||||
@ -997,7 +997,7 @@ again:
|
||||
}
|
||||
|
||||
if (!bch2_dev_bucket_exists(c, k.k->p)) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bucket.inode);
|
||||
|
||||
bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
|
||||
}
|
||||
@ -1030,7 +1030,7 @@ int bch2_check_alloc_key(struct btree_trans *trans,
|
||||
alloc_k.k->p.inode, alloc_k.k->p.offset))
|
||||
return bch2_btree_delete_at(trans, alloc_iter, 0);
|
||||
|
||||
ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
|
||||
ca = bch2_dev_bkey_exists(c, alloc_k.k->p.inode);
|
||||
if (!ca->mi.freespace_initialized)
|
||||
return 0;
|
||||
|
||||
@ -1149,7 +1149,7 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, start.inode);
|
||||
ca = bch2_dev_bkey_exists(c, start.inode);
|
||||
if (!ca->mi.freespace_initialized)
|
||||
return 0;
|
||||
|
||||
@ -1339,7 +1339,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
bkey_reassemble(&g.k_i, k);
|
||||
|
||||
/* if no bch_dev, skip out whether we repair or not */
|
||||
dev_exists = bch2_dev_exists2(c, k.k->p.inode);
|
||||
dev_exists = bch2_dev_exists(c, k.k->p.inode);
|
||||
if (!dev_exists) {
|
||||
if (fsck_err_on(!dev_exists, c,
|
||||
bucket_gens_to_invalid_dev,
|
||||
@ -1350,7 +1350,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
ca = bch2_dev_bkey_exists(c, k.k->p.inode);
|
||||
if (fsck_err_on(end <= ca->mi.first_bucket ||
|
||||
start >= ca->mi.nbuckets, c,
|
||||
bucket_gens_to_invalid_buckets,
|
||||
@ -1669,7 +1669,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
|
||||
bool discard_locked = false;
|
||||
int ret = 0;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pos.inode);
|
||||
ca = bch2_dev_bkey_exists(c, pos.inode);
|
||||
|
||||
if (!percpu_ref_tryget(&ca->io_ref)) {
|
||||
bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
|
||||
@ -1852,7 +1852,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
|
||||
if (i->snapshot)
|
||||
continue;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, i->inode);
|
||||
ca = bch2_dev_bkey_exists(c, i->inode);
|
||||
|
||||
if (!percpu_ref_tryget(&ca->io_ref)) {
|
||||
darray_remove_item(&c->discard_buckets_in_flight, i);
|
||||
@ -1893,7 +1893,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
|
||||
|
||||
static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bucket.inode);
|
||||
|
||||
if (!percpu_ref_is_dying(&ca->io_ref) &&
|
||||
!discard_in_flight_add(c, bucket) &&
|
||||
|
@ -17,10 +17,10 @@ static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
|
||||
if (!bch2_dev_exists2(c, pos.inode))
|
||||
if (!bch2_dev_exists(c, pos.inode))
|
||||
return false;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pos.inode);
|
||||
ca = bch2_dev_bkey_exists(c, pos.inode);
|
||||
return bucket_valid(ca, pos.offset);
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *o
|
||||
|
||||
void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
|
||||
if (ob->ec) {
|
||||
ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
|
||||
@ -679,7 +679,7 @@ static int add_new_bucket(struct bch_fs *c,
|
||||
struct open_bucket *ob)
|
||||
{
|
||||
unsigned durability =
|
||||
bch_dev_bkey_exists(c, ob->dev)->mi.durability;
|
||||
bch2_dev_bkey_exists(c, ob->dev)->mi.durability;
|
||||
|
||||
BUG_ON(*nr_effective >= nr_replicas);
|
||||
|
||||
@ -836,7 +836,7 @@ static bool want_bucket(struct bch_fs *c,
|
||||
bool *have_cache, bool ec,
|
||||
struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
|
||||
if (!test_bit(ob->dev, devs_may_alloc->d))
|
||||
return false;
|
||||
@ -906,7 +906,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
||||
struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
|
||||
|
||||
if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev_usage usage;
|
||||
u64 avail;
|
||||
|
||||
@ -1291,7 +1291,7 @@ deallocate_extra_replicas(struct bch_fs *c,
|
||||
unsigned i;
|
||||
|
||||
open_bucket_for_each(c, ptrs, ob, i) {
|
||||
unsigned d = bch_dev_bkey_exists(c, ob->dev)->mi.durability;
|
||||
unsigned d = bch2_dev_bkey_exists(c, ob->dev)->mi.durability;
|
||||
|
||||
if (d && d <= extra_replicas) {
|
||||
extra_replicas -= d;
|
||||
@ -1444,7 +1444,7 @@ err:
|
||||
|
||||
struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
|
||||
return (struct bch_extent_ptr) {
|
||||
.type = 1 << BCH_EXTENT_ENTRY_ptr,
|
||||
@ -1520,7 +1520,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
|
||||
|
||||
static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
unsigned data_type = ob->data_type;
|
||||
barrier(); /* READ_ONCE() doesn't work on bitfields */
|
||||
|
||||
|
@ -184,7 +184,7 @@ bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
|
||||
wp->sectors_allocated += sectors;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
|
||||
|
||||
ptr.cached = cached ||
|
||||
|
@ -46,10 +46,10 @@ int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
|
||||
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
|
||||
|
||||
/* these will be caught by fsck */
|
||||
if (!bch2_dev_exists2(c, bp.k->p.inode))
|
||||
if (!bch2_dev_exists(c, bp.k->p.inode))
|
||||
return 0;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bp.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bp.k->p.inode);
|
||||
struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
|
||||
int ret = 0;
|
||||
|
||||
@ -75,7 +75,7 @@ void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer
|
||||
|
||||
void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
if (bch2_dev_exists2(c, k.k->p.inode)) {
|
||||
if (bch2_dev_exists(c, k.k->p.inode)) {
|
||||
prt_str(out, "bucket=");
|
||||
bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
|
||||
prt_str(out, " ");
|
||||
@ -366,7 +366,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
|
||||
if (fsck_err_on(!bch2_dev_exists(c, k.k->p.inode), c,
|
||||
backpointer_to_missing_device,
|
||||
"backpointer for missing device:\n%s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
@ -459,7 +459,7 @@ found:
|
||||
|
||||
bytes = p.crc.compressed_size << 9;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, dev);
|
||||
if (!bch2_dev_get_ioref(ca, READ))
|
||||
return false;
|
||||
|
||||
|
@ -39,7 +39,7 @@ void bch2_backpointer_swab(struct bkey_s);
|
||||
static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
|
||||
struct bpos bp_pos)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bp_pos.inode);
|
||||
u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
|
||||
|
||||
return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
|
||||
@ -61,7 +61,7 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
|
||||
struct bpos bucket,
|
||||
u64 bucket_offset)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bucket.inode);
|
||||
struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset);
|
||||
EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
|
||||
return ret;
|
||||
|
@ -1253,11 +1253,6 @@ static inline s64 bch2_current_time(const struct bch_fs *c)
|
||||
return timespec_to_bch2_time(c, now);
|
||||
}
|
||||
|
||||
static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
return dev < c->sb.nr_devices && c->devs[dev];
|
||||
}
|
||||
|
||||
static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
|
||||
{
|
||||
struct stdio_redirect *stdio = c->stdio;
|
||||
|
@ -597,7 +597,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
* use check_bucket_ref here
|
||||
*/
|
||||
bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, p, entry_c);
|
||||
|
||||
@ -730,7 +730,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
*/
|
||||
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
|
||||
|
||||
ptr->gen = g->gen;
|
||||
@ -741,7 +741,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
restart_drop_ptrs:
|
||||
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
|
||||
|
||||
@ -1215,7 +1215,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, iter->pos.inode);
|
||||
struct bucket old_gc, gc, *b;
|
||||
struct bkey_i_alloc_v4 *a;
|
||||
struct bch_alloc_v4 old_convert, new;
|
||||
@ -1351,7 +1351,7 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bucket *g = gc_bucket(ca, k.k->p.offset);
|
||||
|
||||
struct bch_alloc_v4 a_convert;
|
||||
@ -1671,7 +1671,7 @@ static int gc_btree_gens_key(struct btree_trans *trans,
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (ptr_stale(ca, ptr) > 16) {
|
||||
percpu_up_read(&c->mark_lock);
|
||||
@ -1680,7 +1680,7 @@ static int gc_btree_gens_key(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
|
||||
|
||||
if (gen_after(*gen, ptr->gen))
|
||||
@ -1701,7 +1701,7 @@ update:
|
||||
static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(trans->c, iter->pos.inode);
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
|
||||
struct bkey_i_alloc_v4 *a_mut;
|
||||
|
@ -1263,7 +1263,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
btree_node_reset_sib_u64s(b);
|
||||
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
|
||||
struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca2 = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (ca2->mi.state != BCH_MEMBER_STATE_rw)
|
||||
set_btree_node_need_rewrite(b);
|
||||
@ -1293,7 +1293,7 @@ static void btree_node_read_work(struct work_struct *work)
|
||||
container_of(work, struct btree_read_bio, work);
|
||||
struct bch_fs *c = rb->c;
|
||||
struct btree *b = rb->b;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bio *bio = &rb->bio;
|
||||
struct bch_io_failures failed = { .nr = 0 };
|
||||
struct printbuf buf = PRINTBUF;
|
||||
@ -1305,7 +1305,7 @@ static void btree_node_read_work(struct work_struct *work)
|
||||
while (1) {
|
||||
retry = true;
|
||||
bch_info(c, "retrying read");
|
||||
ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
|
||||
bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
|
||||
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
|
||||
@ -1376,7 +1376,7 @@ static void btree_node_read_endio(struct bio *bio)
|
||||
struct bch_fs *c = rb->c;
|
||||
|
||||
if (rb->have_ioref) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
|
||||
bch2_latency_acct(ca, rb->start_time, READ);
|
||||
}
|
||||
@ -1573,7 +1573,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
|
||||
struct btree_node_read_all *ra = rb->ra;
|
||||
|
||||
if (rb->have_ioref) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
|
||||
bch2_latency_acct(ca, rb->start_time, READ);
|
||||
}
|
||||
@ -1615,7 +1615,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
|
||||
|
||||
i = 0;
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
struct btree_read_bio *rb =
|
||||
container_of(ra->bio[i], struct btree_read_bio, bio);
|
||||
rb->c = c;
|
||||
@ -1692,7 +1692,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
|
||||
return;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
|
||||
bio = bio_alloc_bioset(NULL,
|
||||
buf_pages(b->data, btree_buf_bytes(b)),
|
||||
@ -1909,7 +1909,7 @@ static void btree_node_write_endio(struct bio *bio)
|
||||
struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
|
||||
struct bch_fs *c = wbio->c;
|
||||
struct btree *b = wbio->bio.bi_private;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, wbio->dev);
|
||||
unsigned long flags;
|
||||
|
||||
if (wbio->have_ioref)
|
||||
|
@ -493,7 +493,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
|
||||
u32 bucket_sectors)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
@ -787,7 +787,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
|
@ -128,7 +128,7 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
|
||||
static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
|
||||
const struct bch_extent_ptr *ptr)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
|
||||
}
|
||||
@ -137,7 +137,7 @@ static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
|
||||
const struct bch_extent_ptr *ptr,
|
||||
u32 *bucket_offset)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ void bch2_data_update_exit(struct data_update *update)
|
||||
if (c->opts.nocow_enabled)
|
||||
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
||||
PTR_BUCKET_POS(c, ptr), 0);
|
||||
percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
percpu_ref_put(&bch2_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
}
|
||||
|
||||
bch2_bkey_buf_exit(&update->k, c);
|
||||
@ -540,7 +540,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
percpu_ref_get(&bch2_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
|
||||
unsigned durability_have = 0, durability_removing = 0;
|
||||
|
||||
@ -652,7 +652,7 @@ err:
|
||||
if ((1U << i) & ptrs_locked)
|
||||
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
||||
PTR_BUCKET_POS(c, &p.ptr), 0);
|
||||
percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
|
||||
percpu_ref_put(&bch2_dev_bkey_exists(c, p.ptr.dev)->ref);
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
|
||||
struct btree_node *n_ondisk = c->verify_ondisk;
|
||||
struct btree_node *n_sorted = c->verify_data->data;
|
||||
struct bset *sorted, *inmemory = &b->data->keys;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
struct bio *bio;
|
||||
bool failed = false, saw_error = false;
|
||||
|
||||
@ -194,7 +194,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
return;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
if (!bch2_dev_get_ioref(ca, READ)) {
|
||||
prt_printf(out, "error getting device to read from: not online\n");
|
||||
return;
|
||||
|
@ -177,7 +177,7 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
|
||||
struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, i);
|
||||
struct bch_disk_group_cpu *dst;
|
||||
|
||||
if (!bch2_member_exists(&m))
|
||||
if (!bch2_member_alive(&m))
|
||||
continue;
|
||||
|
||||
g = BCH_MEMBER_GROUP(&m);
|
||||
@ -588,7 +588,7 @@ static void bch2_target_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsi
|
||||
case TARGET_DEV: {
|
||||
struct bch_member m = bch2_sb_member_get(sb, t.dev);
|
||||
|
||||
if (bch2_dev_exists(sb, t.dev)) {
|
||||
if (bch2_member_exists(sb, t.dev)) {
|
||||
prt_printf(out, "Device ");
|
||||
pr_uuid(out, m.uuid.b);
|
||||
prt_printf(out, " (%u)", t.dev);
|
||||
|
@ -253,7 +253,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
|
||||
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
|
||||
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket old, new, *g;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
@ -609,7 +609,7 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
|
||||
|
||||
if (bch2_crc_cmp(want, got)) {
|
||||
struct printbuf err = PRINTBUF;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, v->ptrs[i].dev);
|
||||
|
||||
prt_str(&err, "stripe ");
|
||||
bch2_csum_err_msg(&err, v->csum_type, want, got);
|
||||
@ -705,7 +705,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
|
||||
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||
unsigned offset = 0, bytes = buf->size << 9;
|
||||
struct bch_extent_ptr *ptr = &v->ptrs[idx];
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
|
||||
? BCH_DATA_user
|
||||
: BCH_DATA_parity;
|
||||
@ -1321,7 +1321,7 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
|
||||
unsigned block,
|
||||
struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
unsigned offset = ca->mi.bucket_size - ob->sectors_free;
|
||||
int ret;
|
||||
|
||||
@ -1527,7 +1527,7 @@ void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
|
||||
|
||||
BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
|
||||
|
||||
ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
offset = ca->mi.bucket_size - ob->sectors_free;
|
||||
|
||||
return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
|
||||
|
@ -79,8 +79,8 @@ static inline bool ptr_better(struct bch_fs *c,
|
||||
const struct extent_ptr_decoded p2)
|
||||
{
|
||||
if (likely(!p1.idx && !p2.idx)) {
|
||||
struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
|
||||
struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
|
||||
struct bch_dev *dev1 = bch2_dev_bkey_exists(c, p1.ptr.dev);
|
||||
struct bch_dev *dev2 = bch2_dev_bkey_exists(c, p2.ptr.dev);
|
||||
|
||||
u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
|
||||
u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
|
||||
@ -123,7 +123,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
|
||||
if (p.ptr.unwritten)
|
||||
return 0;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, p.ptr.dev);
|
||||
|
||||
/*
|
||||
* If there are any dirty pointers it's an error if we can't
|
||||
@ -278,7 +278,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
|
||||
return false;
|
||||
|
||||
/* Extents may not straddle buckets: */
|
||||
ca = bch_dev_bkey_exists(c, lp.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, lp.ptr.dev);
|
||||
if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
|
||||
return false;
|
||||
|
||||
@ -667,14 +667,14 @@ static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent
|
||||
|
||||
unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p->ptr.dev);
|
||||
|
||||
return __extent_ptr_durability(ca, p);
|
||||
}
|
||||
|
||||
unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p->ptr.dev);
|
||||
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_failed)
|
||||
return 0;
|
||||
@ -864,7 +864,7 @@ bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
if (bch2_dev_in_target(c, ptr->dev, target) &&
|
||||
(!ptr->cached ||
|
||||
!ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
|
||||
!ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -973,17 +973,16 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
|
||||
|
||||
bch2_bkey_drop_ptrs(k, ptr,
|
||||
ptr->cached &&
|
||||
ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
|
||||
ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr));
|
||||
|
||||
return bkey_deleted(k.k);
|
||||
}
|
||||
|
||||
void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
|
||||
{
|
||||
struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
|
||||
? bch_dev_bkey_exists(c, ptr->dev)
|
||||
: NULL;
|
||||
|
||||
out->atomic++;
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_safe(c, ptr->dev);
|
||||
if (!ca) {
|
||||
prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
|
||||
(u64) ptr->offset, ptr->gen,
|
||||
@ -1001,6 +1000,8 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
|
||||
if (bucket_valid(ca, b) && ptr_stale(ca, ptr))
|
||||
prt_printf(out, " stale");
|
||||
}
|
||||
rcu_read_unlock();
|
||||
--out->atomic;
|
||||
}
|
||||
|
||||
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
@ -1079,7 +1080,7 @@ static int extent_ptr_invalid(struct bch_fs *c,
|
||||
struct bch_dev *ca;
|
||||
int ret = 0;
|
||||
|
||||
if (!bch2_dev_exists2(c, ptr->dev)) {
|
||||
if (!bch2_dev_exists(c, ptr->dev)) {
|
||||
/*
|
||||
* If we're in the write path this key might have already been
|
||||
* overwritten, and we could be seeing a device that doesn't
|
||||
@ -1092,7 +1093,7 @@ static int extent_ptr_invalid(struct bch_fs *c,
|
||||
"pointer to invalid device (%u)", ptr->dev);
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
bkey_for_each_ptr(ptrs, ptr2)
|
||||
bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
|
||||
ptr_to_duplicate_device,
|
||||
|
@ -541,7 +541,7 @@ static void __bch2_read_endio(struct work_struct *work)
|
||||
struct bch_read_bio *rbio =
|
||||
container_of(work, struct bch_read_bio, work);
|
||||
struct bch_fs *c = rbio->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rbio->pick.ptr.dev);
|
||||
struct bio *src = &rbio->bio;
|
||||
struct bio *dst = &bch2_rbio_parent(rbio)->bio;
|
||||
struct bvec_iter dst_iter = rbio->bvec_iter;
|
||||
@ -675,7 +675,7 @@ static void bch2_read_endio(struct bio *bio)
|
||||
struct bch_read_bio *rbio =
|
||||
container_of(bio, struct bch_read_bio, bio);
|
||||
struct bch_fs *c = rbio->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rbio->pick.ptr.dev);
|
||||
struct workqueue_struct *wq = NULL;
|
||||
enum rbio_context context = RBIO_CONTEXT_NULL;
|
||||
|
||||
@ -762,7 +762,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
|
||||
struct bch_extent_ptr ptr)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr.dev);
|
||||
struct btree_iter iter;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
@ -831,7 +831,7 @@ retry_pick:
|
||||
goto err;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
|
||||
/*
|
||||
* Stale dirty pointers are treated as IO errors, but @failed isn't
|
||||
|
@ -407,9 +407,9 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
|
||||
BUG_ON(c->opts.nochanges);
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
BUG_ON(!bch2_dev_exists2(c, ptr->dev));
|
||||
BUG_ON(!bch2_dev_exists(c, ptr->dev));
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (to_entry(ptr + 1) < ptrs.end) {
|
||||
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
|
||||
@ -650,7 +650,7 @@ static void bch2_write_endio(struct bio *bio)
|
||||
struct bch_write_bio *wbio = to_wbio(bio);
|
||||
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
|
||||
struct bch_fs *c = wbio->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, wbio->dev);
|
||||
|
||||
if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
|
||||
op->pos.inode,
|
||||
@ -1272,7 +1272,7 @@ retry:
|
||||
bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
|
||||
prefetch(l);
|
||||
|
||||
if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
|
||||
if (unlikely(!bch2_dev_get_ioref(bch2_dev_bkey_exists(c, ptr->dev), WRITE)))
|
||||
goto err_get_ioref;
|
||||
|
||||
/* XXX allocating memory with btree locks held - rare */
|
||||
@ -1293,7 +1293,7 @@ retry:
|
||||
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
|
||||
|
||||
darray_for_each(buckets, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, i->b.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, i->b.inode);
|
||||
|
||||
__bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
|
||||
bucket_to_u64(i->b),
|
||||
@ -1370,7 +1370,7 @@ err:
|
||||
return;
|
||||
err_get_ioref:
|
||||
darray_for_each(buckets, i)
|
||||
percpu_ref_put(&bch_dev_bkey_exists(c, i->b.inode)->io_ref);
|
||||
percpu_ref_put(&bch2_dev_bkey_exists(c, i->b.inode)->io_ref);
|
||||
|
||||
/* Fall back to COW path: */
|
||||
goto out;
|
||||
|
@ -21,7 +21,7 @@ void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
struct journal_replay *j)
|
||||
{
|
||||
darray_for_each(j->ptrs, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, i->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, i->dev);
|
||||
u64 offset;
|
||||
|
||||
div64_u64_rem(i->sector, ca->mi.bucket_size, &offset);
|
||||
@ -677,7 +677,7 @@ static int journal_entry_dev_usage_validate(struct bch_fs *c,
|
||||
|
||||
dev = le32_to_cpu(u->dev);
|
||||
|
||||
if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
|
||||
if (journal_entry_err_on(!bch2_dev_exists(c, dev),
|
||||
c, version, jset, entry,
|
||||
journal_entry_dev_usage_bad_dev,
|
||||
"bad dev")) {
|
||||
@ -1390,7 +1390,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
continue;
|
||||
|
||||
darray_for_each(i->ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (!ptr->csum_good)
|
||||
bch_err_dev_offset(ca, ptr->sector,
|
||||
@ -1400,7 +1400,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
}
|
||||
|
||||
ret = jset_validate(c,
|
||||
bch_dev_bkey_exists(c, i->ptrs.data[0].dev),
|
||||
bch2_dev_bkey_exists(c, i->ptrs.data[0].dev),
|
||||
&i->j,
|
||||
i->ptrs.data[0].sector,
|
||||
READ);
|
||||
@ -1731,7 +1731,7 @@ static CLOSURE_CALLBACK(journal_write_submit)
|
||||
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
|
||||
|
||||
extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
struct journal_device *ja = &ca->journal;
|
||||
|
||||
if (!percpu_ref_tryget(&ca->io_ref)) {
|
||||
|
@ -711,7 +711,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
|
||||
|
||||
a = bch2_alloc_to_v4(k, &a_convert);
|
||||
dirty_sectors = bch2_bucket_sectors_dirty(*a);
|
||||
bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
|
||||
bucket_size = bch2_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
|
||||
fragmentation = a->fragmentation_lru;
|
||||
|
||||
ret = bch2_btree_write_buffer_tryflush(trans);
|
||||
|
@ -372,7 +372,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
|
||||
case BCH_JSET_ENTRY_dev_usage: {
|
||||
struct jset_entry_dev_usage *u =
|
||||
container_of(entry, struct jset_entry_dev_usage, entry);
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, le32_to_cpu(u->dev));
|
||||
unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
|
||||
|
||||
for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
|
||||
|
@ -84,7 +84,7 @@ int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < r->nr_devs; i++)
|
||||
if (!bch2_dev_exists(sb, r->devs[i])) {
|
||||
if (!bch2_member_exists(sb, r->devs[i])) {
|
||||
prt_printf(err, "invalid device %u in entry ", r->devs[i]);
|
||||
goto bad;
|
||||
}
|
||||
@ -200,7 +200,7 @@ cpu_replicas_add_entry(struct bch_fs *c,
|
||||
};
|
||||
|
||||
for (i = 0; i < new_entry->nr_devs; i++)
|
||||
BUG_ON(!bch2_dev_exists2(c, new_entry->devs[i]));
|
||||
BUG_ON(!bch2_dev_exists(c, new_entry->devs[i]));
|
||||
|
||||
BUG_ON(!new_entry->data_type);
|
||||
verify_replicas_entry(new_entry);
|
||||
@ -954,7 +954,7 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
|
||||
continue;
|
||||
|
||||
for (i = 0; i < e->nr_devs; i++) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, e->devs[i]);
|
||||
|
||||
nr_online += test_bit(e->devs[i], devs.d);
|
||||
nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
|
||||
|
@ -164,7 +164,7 @@ static void member_to_text(struct printbuf *out,
|
||||
u64 bucket_size = le16_to_cpu(m.bucket_size);
|
||||
u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size;
|
||||
|
||||
if (!bch2_member_exists(&m))
|
||||
if (!bch2_member_alive(&m))
|
||||
return;
|
||||
|
||||
prt_printf(out, "Device:\t%u\n", i);
|
||||
@ -390,7 +390,7 @@ void bch2_dev_errors_reset(struct bch_dev *ca)
|
||||
bool bch2_dev_btree_bitmap_marked(struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr)
|
||||
if (!bch2_dev_btree_bitmap_marked_sectors(bch_dev_bkey_exists(c, ptr->dev),
|
||||
if (!bch2_dev_btree_bitmap_marked_sectors(bch2_dev_bkey_exists(c, ptr->dev),
|
||||
ptr->offset, btree_sectors(c)))
|
||||
return false;
|
||||
return true;
|
||||
|
@ -158,26 +158,38 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
|
||||
#define for_each_readable_member(c, ca) \
|
||||
__for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
|
||||
|
||||
static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
return dev < c->sb.nr_devices && c->devs[dev];
|
||||
}
|
||||
|
||||
/*
|
||||
* If a key exists that references a device, the device won't be going away and
|
||||
* we can omit rcu_read_lock():
|
||||
*/
|
||||
static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
|
||||
static inline struct bch_dev *bch2_dev_bkey_exists(const struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
|
||||
EBUG_ON(!bch2_dev_exists(c, dev));
|
||||
|
||||
return rcu_dereference_check(c->devs[idx], 1);
|
||||
return rcu_dereference_check(c->devs[dev], 1);
|
||||
}
|
||||
|
||||
static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
|
||||
static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
|
||||
EBUG_ON(!bch2_dev_exists(c, dev));
|
||||
|
||||
return rcu_dereference_protected(c->devs[idx],
|
||||
return rcu_dereference_protected(c->devs[dev],
|
||||
lockdep_is_held(&c->sb_lock) ||
|
||||
lockdep_is_held(&c->state_lock));
|
||||
}
|
||||
|
||||
static inline struct bch_dev *bch2_dev_safe(struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
return c && dev < c->sb.nr_devices
|
||||
? rcu_dereference(c->devs[dev])
|
||||
: NULL;
|
||||
}
|
||||
|
||||
/* XXX kill, move to struct bch_fs */
|
||||
static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
|
||||
{
|
||||
@ -192,16 +204,16 @@ static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
|
||||
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
|
||||
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
|
||||
|
||||
static inline bool bch2_member_exists(struct bch_member *m)
|
||||
static inline bool bch2_member_alive(struct bch_member *m)
|
||||
{
|
||||
return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
|
||||
}
|
||||
|
||||
static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev)
|
||||
static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev)
|
||||
{
|
||||
if (dev < sb->nr_devices) {
|
||||
struct bch_member m = bch2_sb_member_get(sb, dev);
|
||||
return bch2_member_exists(&m);
|
||||
return bch2_member_alive(&m);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -222,7 +234,7 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
|
||||
? BCH_MEMBER_DURABILITY(mi) - 1
|
||||
: 1,
|
||||
.freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
|
||||
.valid = bch2_member_exists(mi),
|
||||
.valid = bch2_member_alive(mi),
|
||||
.btree_bitmap_shift = mi->btree_bitmap_shift,
|
||||
.btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
|
||||
};
|
||||
|
@ -1300,7 +1300,7 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
printbuf_tabstop_push(out, 44);
|
||||
|
||||
for (int i = 0; i < sb->nr_devices; i++)
|
||||
nr_devices += bch2_dev_exists(sb, i);
|
||||
nr_devices += bch2_member_exists(sb, i);
|
||||
|
||||
prt_printf(out, "External UUID:\t");
|
||||
pr_uuid(out, sb->user_uuid.b);
|
||||
|
@ -941,7 +941,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < c->sb.nr_devices; i++)
|
||||
if (bch2_dev_exists(c->disk_sb.sb, i) &&
|
||||
if (bch2_member_exists(c->disk_sb.sb, i) &&
|
||||
bch2_dev_alloc(c, i)) {
|
||||
ret = -EEXIST;
|
||||
goto err;
|
||||
@ -1102,7 +1102,7 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs,
|
||||
if (!uuid_equal(&fs->sb->uuid, &sb->sb->uuid))
|
||||
return -BCH_ERR_device_not_a_member_of_filesystem;
|
||||
|
||||
if (!bch2_dev_exists(fs->sb, sb->sb->dev_idx))
|
||||
if (!bch2_member_exists(fs->sb, sb->sb->dev_idx))
|
||||
return -BCH_ERR_device_has_been_removed;
|
||||
|
||||
if (fs->sb->block_size != sb->sb->block_size)
|
||||
@ -1412,10 +1412,9 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
|
||||
le64_to_cpu(c->disk_sb.sb->seq))
|
||||
bch2_sb_to_fs(c, sb->sb);
|
||||
|
||||
BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
|
||||
!c->devs[sb->sb->dev_idx]);
|
||||
BUG_ON(!bch2_dev_exists(c, sb->sb->dev_idx));
|
||||
|
||||
ca = bch_dev_locked(c, sb->sb->dev_idx);
|
||||
ca = bch2_dev_locked(c, sb->sb->dev_idx);
|
||||
|
||||
ret = __bch2_dev_attach_bdev(ca, sb);
|
||||
if (ret)
|
||||
@ -1507,10 +1506,10 @@ static bool bch2_fs_may_start(struct bch_fs *c)
|
||||
mutex_lock(&c->sb_lock);
|
||||
|
||||
for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
|
||||
if (!bch2_dev_exists(c->disk_sb.sb, i))
|
||||
if (!bch2_member_exists(c->disk_sb.sb, i))
|
||||
continue;
|
||||
|
||||
ca = bch_dev_locked(c, i);
|
||||
ca = bch2_dev_locked(c, i);
|
||||
|
||||
if (!bch2_dev_is_online(ca) &&
|
||||
(ca->mi.state == BCH_MEMBER_STATE_rw ||
|
||||
@ -1779,7 +1778,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
goto no_slot;
|
||||
|
||||
for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
|
||||
if (!bch2_dev_exists(c->disk_sb.sb, dev_idx))
|
||||
if (!bch2_member_exists(c->disk_sb.sb, dev_idx))
|
||||
goto have_slot;
|
||||
no_slot:
|
||||
ret = -BCH_ERR_ENOSPC_sb_members;
|
||||
@ -1885,7 +1884,7 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ca = bch_dev_locked(c, dev_idx);
|
||||
ca = bch2_dev_locked(c, dev_idx);
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
||||
bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
|
||||
|
Loading…
Reference in New Issue
Block a user