bcachefs: Minor journal reclaim improvement

With the btree key cache code, journal reclaim now has a lot more work
to do. It could be the case that after journal reclaim has finished one
iteration there's already more work to do, so put it in a loop to check
for that.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2020-11-02 17:51:38 -05:00 committed by Kent Overstreet
parent 45e4dcba79
commit 2f33ece9b4
2 changed files with 50 additions and 40 deletions

View File

@ -465,34 +465,12 @@ static bool journal_flush_pins(struct journal *j, u64 seq_to_flush,
return ret;
}
/**
* bch2_journal_reclaim - free up journal buckets
*
* Background journal reclaim writes out btree nodes. It should be run
* early enough so that we never completely run out of journal buckets.
*
* High watermarks for triggering background reclaim:
* - FIFO has fewer than 512 entries left
* - fewer than 25% journal buckets free
*
* Background reclaim runs until low watermarks are reached:
* - FIFO has more than 1024 entries left
* - more than 50% journal buckets free
*
* As long as a reclaim can complete in the time it takes to fill up
* 512 journal entries or 25% of all journal buckets, then
* journal_next_bucket() should not stall.
*/
void bch2_journal_reclaim(struct journal *j)
static u64 journal_seq_to_flush(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
unsigned iter, min_nr = 0;
u64 seq_to_flush = 0;
lockdep_assert_held(&j->reclaim_lock);
bch2_journal_do_discards(j);
unsigned iter;
spin_lock(&j->lock);
@ -524,6 +502,41 @@ void bch2_journal_reclaim(struct journal *j)
(j->pin.size >> 1));
spin_unlock(&j->lock);
return seq_to_flush;
}
/**
* bch2_journal_reclaim - free up journal buckets
*
* Background journal reclaim writes out btree nodes. It should be run
* early enough so that we never completely run out of journal buckets.
*
* High watermarks for triggering background reclaim:
* - FIFO has fewer than 512 entries left
* - fewer than 25% journal buckets free
*
* Background reclaim runs until low watermarks are reached:
* - FIFO has more than 1024 entries left
* - more than 50% journal buckets free
*
* As long as a reclaim can complete in the time it takes to fill up
* 512 journal entries or 25% of all journal buckets, then
* journal_next_bucket() should not stall.
*/
void bch2_journal_reclaim(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
unsigned min_nr = 0;
u64 seq_to_flush = 0;
lockdep_assert_held(&j->reclaim_lock);
do {
bch2_journal_do_discards(j);
seq_to_flush = journal_seq_to_flush(j);
min_nr = 0;
/*
* If it's been longer than j->reclaim_delay_ms since we last flushed,
* make sure to flush at least one journal pin:
@ -532,12 +545,9 @@ void bch2_journal_reclaim(struct journal *j)
msecs_to_jiffies(j->reclaim_delay_ms)))
min_nr = 1;
if (j->prereserved.reserved * 2 > j->prereserved.remaining) {
seq_to_flush = max(seq_to_flush, journal_last_seq(j));
if (j->prereserved.reserved * 2 > j->prereserved.remaining)
min_nr = 1;
}
journal_flush_pins(j, seq_to_flush, min_nr);
} while (journal_flush_pins(j, seq_to_flush, min_nr));
if (!bch2_journal_error(j))
queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work,

View File

@ -747,10 +747,10 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
sizeof(struct sort_iter_set);
if (!(c->wq = alloc_workqueue("bcachefs",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcache_copygc",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
!(c->journal_reclaim_wq = alloc_workqueue("bcache_journal",
WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
!(c->journal_reclaim_wq = alloc_workqueue("bcachefs_journal_reclaim",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
percpu_ref_init(&c->writes, bch2_writes_disabled,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||