2017-03-17 14:18:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2019-04-05 09:53:12 +08:00
|
|
|
#include "btree_iter.h"
|
|
|
|
#include "eytzinger.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "journal_seq_blacklist.h"
|
2019-04-05 09:53:12 +08:00
|
|
|
#include "super-io.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* journal_seq_blacklist machinery:
|
|
|
|
*
|
|
|
|
* To guarantee order of btree updates after a crash, we need to detect when a
|
|
|
|
* btree node entry (bset) is newer than the newest journal entry that was
|
|
|
|
* successfully written, and ignore it - effectively ignoring any btree updates
|
|
|
|
* that didn't make it into the journal.
|
|
|
|
*
|
|
|
|
* If we didn't do this, we might have two btree nodes, a and b, both with
|
|
|
|
* updates that weren't written to the journal yet: if b was updated after a,
|
|
|
|
* but b was flushed and not a - oops; on recovery we'll find that the updates
|
|
|
|
* to b happened, but not the updates to a that happened before it.
|
|
|
|
*
|
|
|
|
* Ignoring bsets that are newer than the newest journal entry is always safe,
|
|
|
|
* because everything they contain will also have been journalled - and must
|
|
|
|
* still be present in the journal on disk until a journal entry has been
|
|
|
|
* written _after_ that bset was written.
|
|
|
|
*
|
|
|
|
* To accomplish this, bsets record the newest journal sequence number they
|
|
|
|
* contain updates for; then, on startup, the btree code queries the journal
|
|
|
|
* code to ask "Is this sequence number newer than the newest journal entry? If
|
|
|
|
* so, ignore it."
|
|
|
|
*
|
|
|
|
* When this happens, we must blacklist that journal sequence number: the
|
|
|
|
* journal must not write any entries with that sequence number, and it must
|
|
|
|
* record that it was blacklisted so that a) on recovery we don't think we have
|
|
|
|
* missing journal entries and b) so that the btree code continues to ignore
|
|
|
|
* that bset, until that btree node is rewritten.
|
|
|
|
*/
|
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
static unsigned sb_blacklist_u64s(unsigned nr)
|
|
|
|
{
|
|
|
|
struct bch_sb_field_journal_seq_blacklist *bl;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
static struct bch_sb_field_journal_seq_blacklist *
|
|
|
|
blacklist_entry_try_merge(struct bch_fs *c,
|
|
|
|
struct bch_sb_field_journal_seq_blacklist *bl,
|
|
|
|
unsigned i)
|
|
|
|
{
|
|
|
|
unsigned nr = blacklist_nr_entries(bl);
|
|
|
|
|
|
|
|
if (le64_to_cpu(bl->start[i].end) >=
|
|
|
|
le64_to_cpu(bl->start[i + 1].start)) {
|
|
|
|
bl->start[i].end = bl->start[i + 1].end;
|
|
|
|
--nr;
|
|
|
|
memmove(&bl->start[i],
|
|
|
|
&bl->start[i + 1],
|
|
|
|
sizeof(bl->start[0]) * (nr - i));
|
|
|
|
|
2023-09-27 05:49:34 +08:00
|
|
|
bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
|
|
|
|
sb_blacklist_u64s(nr));
|
2019-04-05 09:53:12 +08:00
|
|
|
BUG_ON(!bl);
|
|
|
|
}
|
2019-03-26 03:10:15 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
return bl;
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-01-04 13:07:23 +08:00
|
|
|
static bool bl_entry_contig_or_overlaps(struct journal_seq_blacklist_entry *e,
|
|
|
|
u64 start, u64 end)
|
|
|
|
{
|
|
|
|
return !(end < le64_to_cpu(e->start) || le64_to_cpu(e->end) < start);
|
|
|
|
}
|
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
|
|
|
|
{
|
|
|
|
struct bch_sb_field_journal_seq_blacklist *bl;
|
|
|
|
unsigned i, nr;
|
|
|
|
int ret = 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
mutex_lock(&c->sb_lock);
|
2023-09-27 05:49:34 +08:00
|
|
|
bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
|
2019-04-05 09:53:12 +08:00
|
|
|
nr = blacklist_nr_entries(bl);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-01-04 13:07:23 +08:00
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
struct journal_seq_blacklist_entry *e =
|
|
|
|
bl->start + i;
|
|
|
|
|
|
|
|
if (bl_entry_contig_or_overlaps(e, start, end)) {
|
|
|
|
e->start = cpu_to_le64(min(start, le64_to_cpu(e->start)));
|
|
|
|
e->end = cpu_to_le64(max(end, le64_to_cpu(e->end)));
|
|
|
|
|
|
|
|
if (i + 1 < nr)
|
|
|
|
bl = blacklist_entry_try_merge(c,
|
|
|
|
bl, i);
|
|
|
|
if (i)
|
|
|
|
bl = blacklist_entry_try_merge(c,
|
|
|
|
bl, i - 1);
|
|
|
|
goto out_write_sb;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-27 05:49:34 +08:00
|
|
|
bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
|
|
|
|
sb_blacklist_u64s(nr + 1));
|
2019-04-05 09:53:12 +08:00
|
|
|
if (!bl) {
|
2023-03-15 03:35:57 +08:00
|
|
|
ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
|
2019-04-05 09:53:12 +08:00
|
|
|
goto out;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
bl->start[nr].start = cpu_to_le64(start);
|
|
|
|
bl->start[nr].end = cpu_to_le64(end);
|
|
|
|
out_write_sb:
|
2021-05-24 05:04:13 +08:00
|
|
|
c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
ret = bch2_write_super(c);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&c->sb_lock);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
return ret ?: bch2_blacklist_table_initialize(c);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
static int journal_seq_blacklist_table_cmp(const void *_l,
|
|
|
|
const void *_r, size_t size)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2019-04-05 09:53:12 +08:00
|
|
|
const struct journal_seq_blacklist_table_entry *l = _l;
|
|
|
|
const struct journal_seq_blacklist_table_entry *r = _r;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-12 16:54:12 +08:00
|
|
|
return cmp_int(l->start, r->start);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
|
|
|
|
bool dirty)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2019-04-05 09:53:12 +08:00
|
|
|
struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
|
|
|
|
struct journal_seq_blacklist_table_entry search = { .start = seq };
|
|
|
|
int idx;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
if (!t)
|
|
|
|
return false;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
idx = eytzinger0_find_le(t->entries, t->nr,
|
|
|
|
sizeof(t->entries[0]),
|
|
|
|
journal_seq_blacklist_table_cmp,
|
|
|
|
&search);
|
|
|
|
if (idx < 0)
|
|
|
|
return false;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
BUG_ON(t->entries[idx].start > seq);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
if (seq >= t->entries[idx].end)
|
|
|
|
return false;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
if (dirty)
|
|
|
|
t->entries[idx].dirty = true;
|
|
|
|
return true;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
int bch2_blacklist_table_initialize(struct bch_fs *c)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2019-04-05 09:53:12 +08:00
|
|
|
struct bch_sb_field_journal_seq_blacklist *bl =
|
2023-09-27 05:49:34 +08:00
|
|
|
bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
|
2019-04-05 09:53:12 +08:00
|
|
|
struct journal_seq_blacklist_table *t;
|
|
|
|
unsigned i, nr = blacklist_nr_entries(bl);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
if (!bl)
|
|
|
|
return 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!t)
|
2023-03-15 03:35:57 +08:00
|
|
|
return -BCH_ERR_ENOMEM_blacklist_table_init;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
t->nr = nr;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
t->entries[i].start = le64_to_cpu(bl->start[i].start);
|
|
|
|
t->entries[i].end = le64_to_cpu(bl->start[i].end);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
eytzinger0_sort(t->entries,
|
|
|
|
t->nr,
|
|
|
|
sizeof(t->entries[0]),
|
|
|
|
journal_seq_blacklist_table_cmp,
|
|
|
|
NULL);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
kfree(c->journal_seq_blacklist_table);
|
2019-04-05 09:53:12 +08:00
|
|
|
c->journal_seq_blacklist_table = t;
|
|
|
|
return 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2022-01-04 12:38:50 +08:00
|
|
|
static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
|
|
|
|
struct bch_sb_field *f,
|
|
|
|
struct printbuf *err)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2019-04-05 09:53:12 +08:00
|
|
|
struct bch_sb_field_journal_seq_blacklist *bl =
|
|
|
|
field_to_type(f, journal_seq_blacklist);
|
2022-01-04 12:38:50 +08:00
|
|
|
unsigned i, nr = blacklist_nr_entries(bl);
|
2019-04-05 09:53:12 +08:00
|
|
|
|
2022-01-04 12:38:50 +08:00
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
struct journal_seq_blacklist_entry *e = bl->start + i;
|
|
|
|
|
|
|
|
if (le64_to_cpu(e->start) >=
|
|
|
|
le64_to_cpu(e->end)) {
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(err, "entry %u start >= end (%llu >= %llu)",
|
2022-01-04 12:38:50 +08:00
|
|
|
i, le64_to_cpu(e->start), le64_to_cpu(e->end));
|
2022-11-20 11:39:08 +08:00
|
|
|
return -BCH_ERR_invalid_sb_journal_seq_blacklist;
|
2022-01-04 12:38:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (i + 1 < nr &&
|
|
|
|
le64_to_cpu(e[0].end) >
|
|
|
|
le64_to_cpu(e[1].start)) {
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
|
2022-01-04 12:38:50 +08:00
|
|
|
i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
|
2022-11-20 11:39:08 +08:00
|
|
|
return -BCH_ERR_invalid_sb_journal_seq_blacklist;
|
2022-01-04 12:38:50 +08:00
|
|
|
}
|
2019-04-05 09:53:12 +08:00
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-01-04 12:38:50 +08:00
|
|
|
return 0;
|
2019-04-05 09:53:12 +08:00
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
|
|
|
|
struct bch_sb *sb,
|
|
|
|
struct bch_sb_field *f)
|
|
|
|
{
|
|
|
|
struct bch_sb_field_journal_seq_blacklist *bl =
|
|
|
|
field_to_type(f, journal_seq_blacklist);
|
|
|
|
struct journal_seq_blacklist_entry *i;
|
|
|
|
unsigned nr = blacklist_nr_entries(bl);
|
|
|
|
|
|
|
|
for (i = bl->start; i < bl->start + nr; i++) {
|
|
|
|
if (i != bl->start)
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, " ");
|
2019-04-05 09:53:12 +08:00
|
|
|
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "%llu-%llu",
|
2019-04-05 09:53:12 +08:00
|
|
|
le64_to_cpu(i->start),
|
|
|
|
le64_to_cpu(i->end));
|
|
|
|
}
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_newline(out);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2019-04-05 09:53:12 +08:00
|
|
|
const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
|
|
|
|
.validate = bch2_sb_journal_seq_blacklist_validate,
|
|
|
|
.to_text = bch2_sb_journal_seq_blacklist_to_text
|
|
|
|
};
|
2022-01-05 08:41:23 +08:00
|
|
|
|
|
|
|
void bch2_blacklist_entries_gc(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(work, struct bch_fs,
|
|
|
|
journal_seq_blacklist_gc_work);
|
|
|
|
struct journal_seq_blacklist_table *t;
|
|
|
|
struct bch_sb_field_journal_seq_blacklist *bl;
|
|
|
|
struct journal_seq_blacklist_entry *src, *dst;
|
2023-09-13 05:16:02 +08:00
|
|
|
struct btree_trans *trans = bch2_trans_get(c);
|
2022-01-05 08:41:23 +08:00
|
|
|
unsigned i, nr, new_nr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < BTREE_ID_NR; i++) {
|
|
|
|
struct btree_iter iter;
|
|
|
|
struct btree *b;
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
|
2022-01-05 08:41:23 +08:00
|
|
|
0, 0, BTREE_ITER_PREFETCH);
|
|
|
|
retry:
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_begin(trans);
|
2022-01-05 08:41:23 +08:00
|
|
|
|
|
|
|
b = bch2_btree_iter_peek_node(&iter);
|
|
|
|
|
|
|
|
while (!(ret = PTR_ERR_OR_ZERO(b)) &&
|
|
|
|
b &&
|
|
|
|
!test_bit(BCH_FS_STOPPING, &c->flags))
|
|
|
|
b = bch2_btree_iter_next_node(&iter);
|
|
|
|
|
2022-07-18 11:06:38 +08:00
|
|
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
2022-01-05 08:41:23 +08:00
|
|
|
goto retry;
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_iter_exit(trans, &iter);
|
2022-01-05 08:41:23 +08:00
|
|
|
}
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_put(trans);
|
2022-01-05 08:41:23 +08:00
|
|
|
if (ret)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mutex_lock(&c->sb_lock);
|
2023-09-27 05:49:34 +08:00
|
|
|
bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
|
2022-01-05 08:41:23 +08:00
|
|
|
if (!bl)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
nr = blacklist_nr_entries(bl);
|
|
|
|
dst = bl->start;
|
|
|
|
|
|
|
|
t = c->journal_seq_blacklist_table;
|
|
|
|
BUG_ON(nr != t->nr);
|
|
|
|
|
|
|
|
for (src = bl->start, i = eytzinger0_first(t->nr);
|
|
|
|
src < bl->start + nr;
|
|
|
|
src++, i = eytzinger0_next(i, nr)) {
|
|
|
|
BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
|
|
|
|
BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
|
|
|
|
|
|
|
|
if (t->entries[i].dirty)
|
|
|
|
*dst++ = *src;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_nr = dst - bl->start;
|
|
|
|
|
|
|
|
bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
|
|
|
|
|
|
|
|
if (new_nr != nr) {
|
2023-09-27 05:49:34 +08:00
|
|
|
bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
|
2022-01-05 08:41:23 +08:00
|
|
|
new_nr ? sb_blacklist_u64s(new_nr) : 0);
|
|
|
|
BUG_ON(new_nr && !bl);
|
|
|
|
|
|
|
|
if (!new_nr)
|
|
|
|
c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
|
|
|
|
|
|
|
|
bch2_write_super(c);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
}
|