bcachefs: move.c refactoring

- add bch2_moving_ctxt_(init|exit)
 - split out __bch2_evacutae_bucket() which takes an existing
   moving_ctxt, this will be used for improving copygc performance by
   pipelining across multiple buckets

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-06-20 15:40:26 -04:00 committed by Kent Overstreet
parent c91996c50a
commit 0337cc7eee
4 changed files with 114 additions and 87 deletions

View File

@ -20,6 +20,20 @@
#include <linux/ioprio.h>
#include <linux/kthread.h>
static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats)
{
mutex_lock(&c->data_progress_lock);
list_add(&stats->list, &c->data_progress_list);
mutex_unlock(&c->data_progress_lock);
}
static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats)
{
mutex_lock(&c->data_progress_lock);
list_del(&stats->list);
mutex_unlock(&c->data_progress_lock);
}
struct moving_io {
struct list_head list;
struct closure cl;
@ -120,9 +134,51 @@ static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
atomic_read(&ctxt->write_sectors) != sectors_pending);
}
void bch2_moving_ctxt_exit(struct moving_context *ctxt)
{
move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
closure_sync(&ctxt->cl);
progress_list_del(ctxt->c, ctxt->stats);
EBUG_ON(atomic_read(&ctxt->write_sectors));
trace_move_data(ctxt->c,
atomic64_read(&ctxt->stats->sectors_moved),
atomic64_read(&ctxt->stats->keys_moved));
}
void bch2_moving_ctxt_init(struct moving_context *ctxt,
struct bch_fs *c,
struct bch_ratelimit *rate,
struct bch_move_stats *stats,
struct write_point_specifier wp,
bool wait_on_copygc)
{
memset(ctxt, 0, sizeof(*ctxt));
ctxt->c = c;
ctxt->rate = rate;
ctxt->stats = stats;
ctxt->wp = wp;
ctxt->wait_on_copygc = wait_on_copygc;
progress_list_add(c, stats);
closure_init_stack(&ctxt->cl);
INIT_LIST_HEAD(&ctxt->reads);
init_waitqueue_head(&ctxt->wait);
if (stats)
stats->data_type = BCH_DATA_user;
}
void bch_move_stats_init(struct bch_move_stats *stats, char *name)
{
memset(stats, 0, sizeof(*stats));
scnprintf(stats->name, sizeof(stats->name), "%s", name);
}
static int bch2_move_extent(struct btree_trans *trans,
struct moving_context *ctxt,
struct write_point_specifier wp,
struct bch_io_opts io_opts,
enum btree_id btree_id,
struct bkey_s_c k,
@ -169,7 +225,7 @@ static int bch2_move_extent(struct btree_trans *trans,
io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
io->rbio.bio.bi_end_io = move_read_endio;
ret = bch2_data_update_init(c, &io->write, wp, io_opts,
ret = bch2_data_update_init(c, &io->write, ctxt->wp, io_opts,
data_opts, btree_id, k);
if (ret)
goto err_free_pages;
@ -238,14 +294,12 @@ err:
}
static int move_ratelimit(struct btree_trans *trans,
struct moving_context *ctxt,
struct bch_ratelimit *rate,
bool wait_on_copygc)
struct moving_context *ctxt)
{
struct bch_fs *c = trans->c;
u64 delay;
if (wait_on_copygc) {
if (ctxt->wait_on_copygc) {
bch2_trans_unlock(trans);
wait_event_killable(c->copygc_running_wq,
!c->copygc_running ||
@ -253,7 +307,7 @@ static int move_ratelimit(struct btree_trans *trans,
}
do {
delay = rate ? bch2_ratelimit_delay(rate) : 0;
delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
if (delay) {
bch2_trans_unlock(trans);
@ -285,17 +339,13 @@ static int move_ratelimit(struct btree_trans *trans,
return 0;
}
static int __bch2_move_data(struct bch_fs *c,
struct moving_context *ctxt,
struct bch_ratelimit *rate,
struct write_point_specifier wp,
static int __bch2_move_data(struct moving_context *ctxt,
struct bpos start,
struct bpos end,
move_pred_fn pred, void *arg,
struct bch_move_stats *stats,
enum btree_id btree_id,
bool wait_on_copygc)
enum btree_id btree_id)
{
struct bch_fs *c = ctxt->c;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
struct bkey_buf sk;
struct btree_trans trans;
@ -308,18 +358,18 @@ static int __bch2_move_data(struct bch_fs *c,
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_user;
stats->btree_id = btree_id;
stats->pos = start;
ctxt->stats->data_type = BCH_DATA_user;
ctxt->stats->btree_id = btree_id;
ctxt->stats->pos = start;
bch2_trans_iter_init(&trans, &iter, btree_id, start,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
if (rate)
bch2_ratelimit_reset(rate);
if (ctxt->rate)
bch2_ratelimit_reset(ctxt->rate);
while (!move_ratelimit(&trans, ctxt, rate, wait_on_copygc)) {
while (!move_ratelimit(&trans, ctxt)) {
bch2_trans_begin(&trans);
k = bch2_btree_iter_peek(&iter);
@ -335,7 +385,7 @@ static int __bch2_move_data(struct bch_fs *c,
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
stats->pos = iter.pos;
ctxt->stats->pos = iter.pos;
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
@ -369,7 +419,7 @@ static int __bch2_move_data(struct bch_fs *c,
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts,
ret2 = bch2_move_extent(&trans, ctxt, io_opts,
btree_id, k, data_opts);
if (ret2) {
if (ret2 == -EINTR)
@ -385,10 +435,10 @@ static int __bch2_move_data(struct bch_fs *c,
goto next;
}
if (rate)
bch2_ratelimit_increment(rate, k.k->size);
if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size);
next:
atomic64_add(k.k->size, &stats->sectors_seen);
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata:
bch2_btree_iter_advance(&iter);
}
@ -400,49 +450,20 @@ next_nondata:
return ret;
}
inline void bch_move_stats_init(struct bch_move_stats *stats, char *name)
{
memset(stats, 0, sizeof(*stats));
scnprintf(stats->name, sizeof(stats->name),
"%s", name);
}
static inline void progress_list_add(struct bch_fs *c,
struct bch_move_stats *stats)
{
mutex_lock(&c->data_progress_lock);
list_add(&stats->list, &c->data_progress_list);
mutex_unlock(&c->data_progress_lock);
}
static inline void progress_list_del(struct bch_fs *c,
struct bch_move_stats *stats)
{
mutex_lock(&c->data_progress_lock);
list_del(&stats->list);
mutex_unlock(&c->data_progress_lock);
}
int bch2_move_data(struct bch_fs *c,
enum btree_id start_btree_id, struct bpos start_pos,
enum btree_id end_btree_id, struct bpos end_pos,
struct bch_ratelimit *rate,
struct write_point_specifier wp,
move_pred_fn pred, void *arg,
struct bch_move_stats *stats,
bool wait_on_copygc)
struct write_point_specifier wp,
bool wait_on_copygc,
move_pred_fn pred, void *arg)
{
struct moving_context ctxt = { .stats = stats };
struct moving_context ctxt;
enum btree_id id;
int ret;
progress_list_add(c, stats);
closure_init_stack(&ctxt.cl);
INIT_LIST_HEAD(&ctxt.reads);
init_waitqueue_head(&ctxt.wait);
stats->data_type = BCH_DATA_user;
bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
for (id = start_btree_id;
id <= min_t(unsigned, end_btree_id, BTREE_ID_NR - 1);
@ -453,25 +474,16 @@ int bch2_move_data(struct bch_fs *c,
id != BTREE_ID_reflink)
continue;
ret = __bch2_move_data(c, &ctxt, rate, wp,
ret = __bch2_move_data(&ctxt,
id == start_btree_id ? start_pos : POS_MIN,
id == end_btree_id ? end_pos : POS_MAX,
pred, arg, stats, id, wait_on_copygc);
pred, arg, id);
if (ret)
break;
}
bch2_moving_ctxt_exit(&ctxt);
move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads));
closure_sync(&ctxt.cl);
EBUG_ON(atomic_read(&ctxt.write_sectors));
trace_move_data(c,
atomic64_read(&stats->sectors_moved),
atomic64_read(&stats->keys_moved));
progress_list_del(c, stats);
return ret;
}
@ -692,8 +704,11 @@ int bch2_data_job(struct bch_fs *c,
ret = bch2_move_data(c,
op.start_btree, op.start_pos,
op.end_btree, op.end_pos,
NULL, writepoint_hashed((unsigned long) current),
rereplicate_pred, c, stats, true) ?: ret;
NULL,
stats,
writepoint_hashed((unsigned long) current),
true,
rereplicate_pred, c) ?: ret;
ret = bch2_replicas_gc2(c) ?: ret;
break;
case BCH_DATA_OP_MIGRATE:
@ -713,8 +728,11 @@ int bch2_data_job(struct bch_fs *c,
ret = bch2_move_data(c,
op.start_btree, op.start_pos,
op.end_btree, op.end_pos,
NULL, writepoint_hashed((unsigned long) current),
migrate_pred, &op, stats, true) ?: ret;
NULL,
stats,
writepoint_hashed((unsigned long) current),
true,
migrate_pred, &op) ?: ret;
ret = bch2_replicas_gc2(c) ?: ret;
break;
case BCH_DATA_OP_REWRITE_OLD_NODES:

View File

@ -10,11 +10,14 @@
struct bch_read_bio;
struct moving_context {
/* Closure for waiting on all reads and writes to complete */
struct closure cl;
struct bch_fs *c;
struct bch_ratelimit *rate;
struct bch_move_stats *stats;
struct write_point_specifier wp;
bool wait_on_copygc;
/* For waiting on outstanding reads and writes: */
struct closure cl;
struct list_head reads;
/* in flight sectors: */
@ -25,7 +28,12 @@ struct moving_context {
};
typedef bool (*move_pred_fn)(struct bch_fs *, void *, struct bkey_s_c,
struct bch_io_opts *, struct data_update_opts *);
struct bch_io_opts *, struct data_update_opts *);
void bch2_moving_ctxt_exit(struct moving_context *);
void bch2_moving_ctxt_init(struct moving_context *, struct bch_fs *,
struct bch_ratelimit *, struct bch_move_stats *,
struct write_point_specifier, bool);
int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *);
@ -33,10 +41,10 @@ int bch2_move_data(struct bch_fs *,
enum btree_id, struct bpos,
enum btree_id, struct bpos,
struct bch_ratelimit *,
struct write_point_specifier,
move_pred_fn, void *,
struct bch_move_stats *,
bool);
struct write_point_specifier,
bool,
move_pred_fn, void *);
int bch2_data_job(struct bch_fs *,
struct bch_move_stats *,

View File

@ -314,10 +314,10 @@ static int bch2_copygc(struct bch_fs *c)
0, POS_MIN,
BTREE_ID_NR, POS_MAX,
NULL,
writepoint_ptr(&c->copygc_write_point),
copygc_pred, NULL,
&move_stats,
false);
writepoint_ptr(&c->copygc_write_point),
false,
copygc_pred, NULL);
if (ret < 0)
bch_err(c, "error %i from bch2_move_data() in copygc", ret);
if (ret)

View File

@ -253,9 +253,10 @@ static int bch2_rebalance_thread(void *arg)
BTREE_ID_NR, POS_MAX,
/* ratelimiting disabled for now */
NULL, /* &r->pd.rate, */
&move_stats,
writepoint_ptr(&c->rebalance_write_point),
rebalance_pred, NULL,
&move_stats, true);
true,
rebalance_pred, NULL);
}
return 0;