2017-03-17 14:18:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include "bcachefs.h"
|
2022-02-10 17:32:19 +08:00
|
|
|
#include "alloc_background.h"
|
2018-10-06 12:46:55 +08:00
|
|
|
#include "alloc_foreground.h"
|
2020-01-08 02:29:32 +08:00
|
|
|
#include "btree_io.h"
|
2020-05-26 02:57:06 +08:00
|
|
|
#include "btree_update_interior.h"
|
2023-11-03 06:57:19 +08:00
|
|
|
#include "btree_write_buffer.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "buckets.h"
|
|
|
|
#include "checksum.h"
|
2021-01-30 04:37:28 +08:00
|
|
|
#include "disk_groups.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "error.h"
|
|
|
|
#include "journal.h"
|
|
|
|
#include "journal_io.h"
|
|
|
|
#include "journal_reclaim.h"
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
#include "journal_seq_blacklist.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "replicas.h"
|
2023-08-06 03:54:38 +08:00
|
|
|
#include "sb-clean.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "trace.h"
|
|
|
|
|
2024-04-26 12:32:56 +08:00
|
|
|
void bch2_journal_pos_from_member_info_set(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&c->sb_lock);
|
|
|
|
|
|
|
|
for_each_member_device(c, ca) {
|
|
|
|
struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
|
|
|
|
|
|
|
|
m->last_journal_bucket = cpu_to_le32(ca->journal.cur_idx);
|
|
|
|
m->last_journal_bucket_offset = cpu_to_le32(ca->mi.bucket_size - ca->journal.sectors_free);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_journal_pos_from_member_info_resume(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
for_each_member_device(c, ca) {
|
|
|
|
struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx);
|
|
|
|
|
|
|
|
unsigned idx = le32_to_cpu(m.last_journal_bucket);
|
|
|
|
if (idx < ca->journal.nr)
|
|
|
|
ca->journal.cur_idx = idx;
|
|
|
|
unsigned offset = le32_to_cpu(m.last_journal_bucket_offset);
|
|
|
|
if (offset <= ca->mi.bucket_size)
|
|
|
|
ca->journal.sectors_free = ca->mi.bucket_size - offset;
|
|
|
|
}
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
}
|
|
|
|
|
2024-01-27 23:01:23 +08:00
|
|
|
void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct journal_replay *j)
|
|
|
|
{
|
|
|
|
darray_for_each(j->ptrs, i) {
|
|
|
|
if (i != j->ptrs.data)
|
|
|
|
prt_printf(out, " ");
|
|
|
|
prt_printf(out, "%u:%u:%u (sector %llu)",
|
|
|
|
i->dev, i->bucket, i->bucket_offset, i->sector);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_journal_replay_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct journal_replay *j)
|
|
|
|
{
|
|
|
|
prt_printf(out, "seq %llu ", le64_to_cpu(j->j.seq));
|
|
|
|
|
|
|
|
bch2_journal_ptrs_to_text(out, c, j);
|
2024-01-27 23:16:15 +08:00
|
|
|
|
|
|
|
for_each_jset_entry_type(entry, &j->j, BCH_JSET_ENTRY_datetime) {
|
|
|
|
struct jset_entry_datetime *datetime =
|
|
|
|
container_of(entry, struct jset_entry_datetime, entry);
|
|
|
|
bch2_prt_datetime(out, le64_to_cpu(datetime->seconds));
|
|
|
|
break;
|
|
|
|
}
|
2024-01-27 23:01:23 +08:00
|
|
|
}
|
|
|
|
|
2022-10-14 13:14:15 +08:00
|
|
|
static struct nonce journal_nonce(const struct jset *jset)
|
|
|
|
{
|
|
|
|
return (struct nonce) {{
|
|
|
|
[0] = 0,
|
|
|
|
[1] = ((__le32 *) &jset->seq)[0],
|
|
|
|
[2] = ((__le32 *) &jset->seq)[1],
|
|
|
|
[3] = BCH_NONCE_JOURNAL,
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
|
2024-01-06 00:59:03 +08:00
|
|
|
static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum)
|
2022-10-14 13:14:15 +08:00
|
|
|
{
|
2024-01-06 00:59:03 +08:00
|
|
|
if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) {
|
|
|
|
*csum = (struct bch_csum) {};
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
*csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j);
|
|
|
|
return !bch2_crc_cmp(j->csum, *csum);
|
2022-10-14 13:14:15 +08:00
|
|
|
}
|
|
|
|
|
2022-04-11 04:26:34 +08:00
|
|
|
static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
{
|
2022-04-11 04:26:34 +08:00
|
|
|
return (seq - c->journal_entries_base_seq) & (~0U >> 1);
|
2022-03-21 12:15:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __journal_replay_free(struct bch_fs *c,
|
|
|
|
struct journal_replay *i)
|
|
|
|
{
|
|
|
|
struct journal_replay **p =
|
2022-04-11 04:26:34 +08:00
|
|
|
genradix_ptr(&c->journal_entries,
|
|
|
|
journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
|
2022-03-21 12:15:53 +08:00
|
|
|
|
|
|
|
BUG_ON(*p != i);
|
|
|
|
*p = NULL;
|
2024-02-01 19:35:46 +08:00
|
|
|
kvfree(i);
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
}
|
|
|
|
|
2024-02-26 07:48:21 +08:00
|
|
|
static void journal_replay_free(struct bch_fs *c, struct journal_replay *i, bool blacklisted)
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
{
|
2024-02-26 07:48:21 +08:00
|
|
|
if (blacklisted)
|
|
|
|
i->ignore_blacklisted = true;
|
|
|
|
else
|
|
|
|
i->ignore_not_dirty = true;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
|
|
|
|
if (!c->opts.read_entire_journal)
|
2022-03-21 12:15:53 +08:00
|
|
|
__journal_replay_free(c, i);
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
struct journal_list {
|
|
|
|
struct closure cl;
|
2022-04-11 04:26:34 +08:00
|
|
|
u64 last_seq;
|
2017-03-17 14:18:50 +08:00
|
|
|
struct mutex lock;
|
|
|
|
int ret;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define JOURNAL_ENTRY_ADD_OK 0
|
|
|
|
#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a journal entry we just read, add it to the list of journal entries to
|
|
|
|
* be replayed:
|
|
|
|
*/
|
|
|
|
static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
|
2022-02-19 14:18:18 +08:00
|
|
|
struct journal_ptr entry_ptr,
|
2022-10-14 13:14:15 +08:00
|
|
|
struct journal_list *jlist, struct jset *j)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2022-03-21 12:15:53 +08:00
|
|
|
struct genradix_iter iter;
|
|
|
|
struct journal_replay **_i, *i, *dup;
|
2017-03-17 14:18:50 +08:00
|
|
|
size_t bytes = vstruct_bytes(j);
|
2022-04-11 04:26:34 +08:00
|
|
|
u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
|
2024-01-27 23:01:23 +08:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2021-01-27 05:04:12 +08:00
|
|
|
int ret = JOURNAL_ENTRY_ADD_OK;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-04-21 10:19:48 +08:00
|
|
|
if (!c->journal.oldest_seq_found_ondisk ||
|
|
|
|
le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk)
|
|
|
|
c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq);
|
|
|
|
|
2022-04-11 04:26:34 +08:00
|
|
|
/* Is this entry older than the range we need? */
|
|
|
|
if (!c->opts.read_entire_journal &&
|
|
|
|
le64_to_cpu(j->seq) < jlist->last_seq)
|
|
|
|
return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
|
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
/*
|
2022-04-11 04:26:34 +08:00
|
|
|
* genradixes are indexed by a ulong, not a u64, so we can't index them
|
|
|
|
* by sequence number directly: Assume instead that they will all fall
|
|
|
|
* within the range of +-2billion of the filrst one we find.
|
2022-03-21 12:15:53 +08:00
|
|
|
*/
|
|
|
|
if (!c->journal_entries_base_seq)
|
|
|
|
c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
/* Drop entries we don't need anymore */
|
2022-04-11 04:26:34 +08:00
|
|
|
if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
|
|
|
|
genradix_for_each_from(&c->journal_entries, iter, _i,
|
|
|
|
journal_entry_radix_idx(c, jlist->last_seq)) {
|
2022-03-21 12:15:53 +08:00
|
|
|
i = *_i;
|
|
|
|
|
2024-02-26 07:48:21 +08:00
|
|
|
if (journal_replay_ignore(i))
|
2022-03-21 12:15:53 +08:00
|
|
|
continue;
|
|
|
|
|
2022-04-11 04:26:34 +08:00
|
|
|
if (le64_to_cpu(i->j.seq) >= last_seq)
|
2020-06-14 06:43:14 +08:00
|
|
|
break;
|
2024-02-26 07:48:21 +08:00
|
|
|
|
|
|
|
journal_replay_free(c, i, false);
|
2020-06-14 06:43:14 +08:00
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2022-04-11 04:26:34 +08:00
|
|
|
jlist->last_seq = max(jlist->last_seq, last_seq);
|
|
|
|
|
|
|
|
_i = genradix_ptr_alloc(&c->journal_entries,
|
|
|
|
journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!_i)
|
2023-03-15 03:35:57 +08:00
|
|
|
return -BCH_ERR_ENOMEM_journal_entry_add;
|
2021-01-27 05:04:12 +08:00
|
|
|
|
2020-08-25 03:58:26 +08:00
|
|
|
/*
|
|
|
|
* Duplicate journal entries? If so we want the one that didn't have a
|
|
|
|
* checksum error:
|
|
|
|
*/
|
2022-04-11 04:26:34 +08:00
|
|
|
dup = *_i;
|
2021-01-27 05:04:12 +08:00
|
|
|
if (dup) {
|
2024-01-27 23:01:23 +08:00
|
|
|
bool identical = bytes == vstruct_bytes(&dup->j) &&
|
|
|
|
!memcmp(j, &dup->j, bytes);
|
|
|
|
bool not_identical = !identical &&
|
|
|
|
entry_ptr.csum_good &&
|
|
|
|
dup->csum_good;
|
2022-10-14 13:14:15 +08:00
|
|
|
|
2024-01-27 23:01:23 +08:00
|
|
|
bool same_device = false;
|
|
|
|
darray_for_each(dup->ptrs, ptr)
|
|
|
|
if (ptr->dev == ca->dev_idx)
|
|
|
|
same_device = true;
|
|
|
|
|
|
|
|
ret = darray_push(&dup->ptrs, entry_ptr);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-01-27 23:01:23 +08:00
|
|
|
bch2_journal_replay_to_text(&buf, c, dup);
|
|
|
|
|
|
|
|
fsck_err_on(same_device,
|
|
|
|
c, journal_entry_dup_same_device,
|
|
|
|
"duplicate journal entry on same device\n %s",
|
|
|
|
buf.buf);
|
|
|
|
|
|
|
|
fsck_err_on(not_identical,
|
|
|
|
c, journal_entry_replicas_data_mismatch,
|
|
|
|
"found duplicate but non identical journal entries\n %s",
|
|
|
|
buf.buf);
|
|
|
|
|
|
|
|
if (entry_ptr.csum_good && !identical)
|
2022-10-14 13:14:15 +08:00
|
|
|
goto replace;
|
|
|
|
|
2024-01-27 23:01:23 +08:00
|
|
|
goto out;
|
2022-10-14 13:14:15 +08:00
|
|
|
}
|
|
|
|
replace:
|
2024-02-01 19:35:46 +08:00
|
|
|
i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
|
2022-04-11 04:26:34 +08:00
|
|
|
if (!i)
|
2023-03-15 03:35:57 +08:00
|
|
|
return -BCH_ERR_ENOMEM_journal_entry_add;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-01-27 13:05:03 +08:00
|
|
|
darray_init(&i->ptrs);
|
2024-02-26 07:48:21 +08:00
|
|
|
i->csum_good = entry_ptr.csum_good;
|
|
|
|
i->ignore_blacklisted = false;
|
|
|
|
i->ignore_not_dirty = false;
|
2017-03-17 14:18:50 +08:00
|
|
|
unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
|
2021-01-27 05:04:12 +08:00
|
|
|
|
|
|
|
if (dup) {
|
2022-10-14 13:14:15 +08:00
|
|
|
/* The first ptr should represent the jset we kept: */
|
2024-01-27 13:05:03 +08:00
|
|
|
darray_for_each(dup->ptrs, ptr)
|
|
|
|
darray_push(&i->ptrs, *ptr);
|
2022-03-21 12:15:53 +08:00
|
|
|
__journal_replay_free(c, dup);
|
2024-01-27 23:01:23 +08:00
|
|
|
} else {
|
|
|
|
darray_push(&i->ptrs, entry_ptr);
|
2021-01-27 05:04:12 +08:00
|
|
|
}
|
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
*_i = i;
|
2017-03-17 14:18:50 +08:00
|
|
|
out:
|
|
|
|
fsck_err:
|
2024-01-27 23:01:23 +08:00
|
|
|
printbuf_exit(&buf);
|
2017-03-17 14:18:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this fills in a range with empty jset_entries: */
|
|
|
|
static void journal_entry_null_range(void *start, void *end)
|
|
|
|
{
|
|
|
|
struct jset_entry *entry;
|
|
|
|
|
|
|
|
for (entry = start; entry != end; entry = vstruct_next(entry))
|
|
|
|
memset(entry, 0, sizeof(*entry));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define JOURNAL_ENTRY_REREAD 5
|
|
|
|
#define JOURNAL_ENTRY_NONE 6
|
|
|
|
#define JOURNAL_ENTRY_BAD 7
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
static void journal_entry_err_msg(struct printbuf *out,
|
2023-08-06 22:57:25 +08:00
|
|
|
u32 version,
|
2022-09-27 04:23:19 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
2023-08-06 22:57:25 +08:00
|
|
|
prt_str(out, "invalid journal entry, version=");
|
|
|
|
bch2_version_to_text(out, version);
|
|
|
|
|
|
|
|
if (entry) {
|
|
|
|
prt_str(out, " type=");
|
2024-04-13 03:17:00 +08:00
|
|
|
bch2_prt_jset_entry_type(out, entry->type);
|
2023-08-06 22:57:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!jset) {
|
|
|
|
prt_printf(out, " in superblock");
|
|
|
|
} else {
|
|
|
|
|
|
|
|
prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq));
|
|
|
|
|
|
|
|
if (entry)
|
|
|
|
prt_printf(out, " offset=%zi/%u",
|
|
|
|
(u64 *) entry - jset->_data,
|
|
|
|
le32_to_cpu(jset->u64s));
|
|
|
|
}
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
prt_str(out, ": ");
|
|
|
|
}
|
|
|
|
|
2023-10-25 08:44:36 +08:00
|
|
|
#define journal_entry_err(c, version, jset, entry, _err, msg, ...) \
|
2017-03-17 14:18:50 +08:00
|
|
|
({ \
|
2023-09-13 06:41:22 +08:00
|
|
|
struct printbuf _buf = PRINTBUF; \
|
2022-09-27 04:23:19 +08:00
|
|
|
\
|
2023-09-13 06:41:22 +08:00
|
|
|
journal_entry_err_msg(&_buf, version, jset, entry); \
|
|
|
|
prt_printf(&_buf, msg, ##__VA_ARGS__); \
|
2022-09-27 04:23:19 +08:00
|
|
|
\
|
2024-05-09 06:40:42 +08:00
|
|
|
switch (flags & BCH_VALIDATE_write) { \
|
2017-03-17 14:18:50 +08:00
|
|
|
case READ: \
|
2023-10-25 08:44:36 +08:00
|
|
|
mustfix_fsck_err(c, _err, "%s", _buf.buf); \
|
2017-03-17 14:18:50 +08:00
|
|
|
break; \
|
|
|
|
case WRITE: \
|
2023-10-25 08:44:36 +08:00
|
|
|
bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \
|
2023-09-13 06:41:22 +08:00
|
|
|
bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
|
2017-03-17 14:18:50 +08:00
|
|
|
if (bch2_fs_inconsistent(c)) { \
|
2022-07-20 05:20:18 +08:00
|
|
|
ret = -BCH_ERR_fsck_errors_not_fixed; \
|
2017-03-17 14:18:50 +08:00
|
|
|
goto fsck_err; \
|
|
|
|
} \
|
|
|
|
break; \
|
|
|
|
} \
|
2022-09-27 04:23:19 +08:00
|
|
|
\
|
2023-09-13 06:41:22 +08:00
|
|
|
printbuf_exit(&_buf); \
|
2017-03-17 14:18:50 +08:00
|
|
|
true; \
|
|
|
|
})
|
|
|
|
|
2023-10-25 08:44:36 +08:00
|
|
|
#define journal_entry_err_on(cond, ...) \
|
|
|
|
((cond) ? journal_entry_err(__VA_ARGS__) : false)
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2020-11-17 01:22:30 +08:00
|
|
|
#define FSCK_DELETED_KEY 5
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
static int journal_validate_key(struct bch_fs *c,
|
|
|
|
struct jset *jset,
|
2017-03-17 14:18:50 +08:00
|
|
|
struct jset_entry *entry,
|
2020-01-08 02:29:32 +08:00
|
|
|
unsigned level, enum btree_id btree_id,
|
2022-06-06 03:32:57 +08:00
|
|
|
struct bkey_i *k,
|
2023-08-07 00:43:31 +08:00
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2024-05-09 06:40:42 +08:00
|
|
|
int write = flags & BCH_VALIDATE_write;
|
2017-03-17 14:18:50 +08:00
|
|
|
void *next = vstruct_next(entry);
|
2022-04-04 05:50:01 +08:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2017-03-17 14:18:50 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2023-10-25 08:44:36 +08:00
|
|
|
if (journal_entry_err_on(!k->k.u64s,
|
|
|
|
c, version, jset, entry,
|
|
|
|
journal_entry_bkey_u64s_0,
|
|
|
|
"k->u64s 0")) {
|
2017-03-17 14:18:50 +08:00
|
|
|
entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
|
|
|
|
journal_entry_null_range(vstruct_next(entry), next);
|
2020-11-17 01:22:30 +08:00
|
|
|
return FSCK_DELETED_KEY;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (journal_entry_err_on((void *) bkey_next(k) >
|
2022-09-27 04:23:19 +08:00
|
|
|
(void *) vstruct_next(entry),
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, entry,
|
2023-10-25 08:44:36 +08:00
|
|
|
journal_entry_bkey_past_end,
|
2022-09-27 04:23:19 +08:00
|
|
|
"extends past end of journal entry")) {
|
2017-03-17 14:18:50 +08:00
|
|
|
entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
|
|
|
|
journal_entry_null_range(vstruct_next(entry), next);
|
2020-11-17 01:22:30 +08:00
|
|
|
return FSCK_DELETED_KEY;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, entry,
|
2023-10-25 08:44:36 +08:00
|
|
|
journal_entry_bkey_bad_format,
|
2022-09-27 04:23:19 +08:00
|
|
|
"bad format %u", k->k.format)) {
|
2020-11-17 01:22:30 +08:00
|
|
|
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
|
2017-03-17 14:18:50 +08:00
|
|
|
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
|
|
|
|
journal_entry_null_range(vstruct_next(entry), next);
|
2020-11-17 01:22:30 +08:00
|
|
|
return FSCK_DELETED_KEY;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2020-01-08 02:29:32 +08:00
|
|
|
if (!write)
|
2021-03-05 08:06:26 +08:00
|
|
|
bch2_bkey_compat(level, btree_id, version, big_endian,
|
|
|
|
write, NULL, bkey_to_packed(k));
|
2018-11-02 03:10:01 +08:00
|
|
|
|
2022-04-04 05:50:01 +08:00
|
|
|
if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
|
2022-04-04 09:50:25 +08:00
|
|
|
__btree_node_type(level, btree_id), write, &buf)) {
|
2022-04-04 05:50:01 +08:00
|
|
|
printbuf_reset(&buf);
|
2023-08-06 22:57:25 +08:00
|
|
|
journal_entry_err_msg(&buf, version, jset, entry);
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_newline(&buf);
|
|
|
|
printbuf_indent_add(&buf, 2);
|
2018-11-09 14:24:07 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_newline(&buf);
|
2022-04-04 05:50:01 +08:00
|
|
|
bch2_bkey_invalid(c, bkey_i_to_s_c(k),
|
2022-04-04 09:50:25 +08:00
|
|
|
__btree_node_type(level, btree_id), write, &buf);
|
2022-04-04 05:50:01 +08:00
|
|
|
|
2023-10-25 08:44:36 +08:00
|
|
|
mustfix_fsck_err(c, journal_entry_bkey_invalid,
|
|
|
|
"%s", buf.buf);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2020-11-17 01:22:30 +08:00
|
|
|
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
|
2017-03-17 14:18:50 +08:00
|
|
|
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
|
|
|
|
journal_entry_null_range(vstruct_next(entry), next);
|
2022-04-04 05:50:01 +08:00
|
|
|
|
|
|
|
printbuf_exit(&buf);
|
2020-11-17 01:22:30 +08:00
|
|
|
return FSCK_DELETED_KEY;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
2018-11-02 03:10:01 +08:00
|
|
|
|
2020-01-08 02:29:32 +08:00
|
|
|
if (write)
|
2021-03-05 08:06:26 +08:00
|
|
|
bch2_bkey_compat(level, btree_id, version, big_endian,
|
|
|
|
write, NULL, bkey_to_packed(k));
|
2017-03-17 14:18:50 +08:00
|
|
|
fsck_err:
|
2022-04-04 05:50:01 +08:00
|
|
|
printbuf_exit(&buf);
|
2017-03-17 14:18:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static int journal_entry_btree_keys_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2020-11-17 01:22:30 +08:00
|
|
|
struct bkey_i *k = entry->start;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2020-11-17 01:22:30 +08:00
|
|
|
while (k != vstruct_last(entry)) {
|
2022-09-27 04:23:19 +08:00
|
|
|
int ret = journal_validate_key(c, jset, entry,
|
2020-01-08 02:29:32 +08:00
|
|
|
entry->level,
|
|
|
|
entry->btree_id,
|
2023-07-07 09:16:10 +08:00
|
|
|
k, version, big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
flags|BCH_VALIDATE_journal);
|
2020-11-17 01:22:30 +08:00
|
|
|
if (ret == FSCK_DELETED_KEY)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
k = bkey_next(k);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
2022-02-16 15:50:39 +08:00
|
|
|
bool first = true;
|
2022-01-01 06:06:29 +08:00
|
|
|
|
2023-03-05 12:05:55 +08:00
|
|
|
jset_entry_for_each_key(entry, k) {
|
2022-02-16 15:50:39 +08:00
|
|
|
if (!first) {
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_newline(out);
|
2024-04-13 03:17:00 +08:00
|
|
|
bch2_prt_jset_entry_type(out, entry->type);
|
|
|
|
prt_str(out, ": ");
|
2022-02-16 15:50:39 +08:00
|
|
|
}
|
2023-10-20 10:49:08 +08:00
|
|
|
prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
|
2022-01-01 06:06:29 +08:00
|
|
|
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
|
2022-02-16 15:50:39 +08:00
|
|
|
first = false;
|
|
|
|
}
|
2022-01-01 06:06:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_btree_root_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bkey_i *k = entry->start;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (journal_entry_err_on(!entry->u64s ||
|
2022-09-27 04:23:19 +08:00
|
|
|
le16_to_cpu(entry->u64s) != k->k.u64s,
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, entry,
|
2023-10-25 08:44:36 +08:00
|
|
|
journal_entry_btree_root_bad_size,
|
2017-03-17 14:18:50 +08:00
|
|
|
"invalid btree root journal entry: wrong number of keys")) {
|
|
|
|
void *next = vstruct_next(entry);
|
|
|
|
/*
|
|
|
|
* we don't want to null out this jset_entry,
|
|
|
|
* just the contents, so that later we can tell
|
|
|
|
* we were _supposed_ to have a btree root
|
|
|
|
*/
|
|
|
|
entry->u64s = 0;
|
|
|
|
journal_entry_null_range(vstruct_next(entry), next);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-12-22 12:17:00 +08:00
|
|
|
ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
|
|
|
|
version, big_endian, flags);
|
|
|
|
if (ret == FSCK_DELETED_KEY)
|
|
|
|
ret = 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
journal_entry_btree_keys_to_text(out, c, entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
/* obsolete, don't care: */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_blacklist_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, entry,
|
2023-10-25 08:44:36 +08:00
|
|
|
journal_entry_blacklist_bad_size,
|
2017-03-17 14:18:50 +08:00
|
|
|
"invalid journal seq blacklist entry: bad size")) {
|
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
}
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
struct jset_entry_blacklist *bl =
|
|
|
|
container_of(entry, struct jset_entry_blacklist, entry);
|
|
|
|
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
|
2022-01-01 06:06:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct jset_entry_blacklist_v2 *bl_entry;
|
|
|
|
int ret = 0;
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, entry,
|
2023-10-25 08:44:36 +08:00
|
|
|
journal_entry_blacklist_v2_bad_size,
|
2017-03-17 14:18:50 +08:00
|
|
|
"invalid journal seq blacklist entry: bad size")) {
|
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
2019-01-25 06:12:00 +08:00
|
|
|
goto out;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
|
|
|
|
|
|
|
|
if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
|
2022-09-27 04:23:19 +08:00
|
|
|
le64_to_cpu(bl_entry->end),
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, entry,
|
2023-10-25 08:44:36 +08:00
|
|
|
journal_entry_blacklist_v2_start_past_end,
|
2017-03-17 14:18:50 +08:00
|
|
|
"invalid journal seq blacklist entry: start > end")) {
|
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
}
|
2019-01-25 06:12:00 +08:00
|
|
|
out:
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
struct jset_entry_blacklist_v2 *bl =
|
|
|
|
container_of(entry, struct jset_entry_blacklist_v2, entry);
|
|
|
|
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "start=%llu end=%llu",
|
2022-01-01 06:06:29 +08:00
|
|
|
le64_to_cpu(bl->start),
|
|
|
|
le64_to_cpu(bl->end));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_usage_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2019-01-25 06:12:00 +08:00
|
|
|
{
|
|
|
|
struct jset_entry_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_usage, entry);
|
|
|
|
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
|
|
|
|
int ret = 0;
|
|
|
|
|
2019-02-10 08:20:57 +08:00
|
|
|
if (journal_entry_err_on(bytes < sizeof(*u),
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, entry,
|
2023-10-25 08:44:36 +08:00
|
|
|
journal_entry_usage_bad_size,
|
2019-02-10 08:20:57 +08:00
|
|
|
"invalid journal entry usage: bad size")) {
|
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
struct jset_entry_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_usage, entry);
|
|
|
|
|
2024-04-13 03:17:00 +08:00
|
|
|
prt_str(out, "type=");
|
|
|
|
bch2_prt_fs_usage_type(out, u->entry.btree_id);
|
|
|
|
prt_printf(out, " v=%llu", le64_to_cpu(u->v));
|
2022-01-01 06:06:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_data_usage_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2019-02-10 08:20:57 +08:00
|
|
|
{
|
|
|
|
struct jset_entry_data_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_data_usage, entry);
|
|
|
|
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
|
2023-11-26 10:42:08 +08:00
|
|
|
struct printbuf err = PRINTBUF;
|
2019-02-10 08:20:57 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2019-01-25 06:12:00 +08:00
|
|
|
if (journal_entry_err_on(bytes < sizeof(*u) ||
|
|
|
|
bytes < sizeof(*u) + u->r.nr_devs,
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, entry,
|
2023-10-25 08:44:36 +08:00
|
|
|
journal_entry_data_usage_bad_size,
|
2019-01-25 06:12:00 +08:00
|
|
|
"invalid journal entry usage: bad size")) {
|
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
2023-11-26 10:42:08 +08:00
|
|
|
goto out;
|
2019-01-25 06:12:00 +08:00
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2023-11-26 10:42:08 +08:00
|
|
|
if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err),
|
|
|
|
c, version, jset, entry,
|
|
|
|
journal_entry_data_usage_bad_size,
|
|
|
|
"invalid journal entry usage: %s", err.buf)) {
|
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
out:
|
2017-03-17 14:18:50 +08:00
|
|
|
fsck_err:
|
2023-11-26 10:42:08 +08:00
|
|
|
printbuf_exit(&err);
|
2017-03-17 14:18:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
struct jset_entry_data_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_data_usage, entry);
|
|
|
|
|
|
|
|
bch2_replicas_entry_to_text(out, &u->r);
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "=%llu", le64_to_cpu(u->v));
|
2022-01-01 06:06:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_clock_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2021-01-22 04:28:59 +08:00
|
|
|
{
|
|
|
|
struct jset_entry_clock *clock =
|
|
|
|
container_of(entry, struct jset_entry_clock, entry);
|
|
|
|
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (journal_entry_err_on(bytes != sizeof(*clock),
|
2023-10-25 08:44:36 +08:00
|
|
|
c, version, jset, entry,
|
|
|
|
journal_entry_clock_bad_size,
|
|
|
|
"bad size")) {
|
2021-01-22 04:28:59 +08:00
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (journal_entry_err_on(clock->rw > 1,
|
2023-10-25 08:44:36 +08:00
|
|
|
c, version, jset, entry,
|
|
|
|
journal_entry_clock_bad_rw,
|
|
|
|
"bad rw")) {
|
2021-01-22 04:28:59 +08:00
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
struct jset_entry_clock *clock =
|
|
|
|
container_of(entry, struct jset_entry_clock, entry);
|
|
|
|
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
|
2022-01-01 06:06:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_dev_usage_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2021-01-22 10:52:06 +08:00
|
|
|
{
|
|
|
|
struct jset_entry_dev_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_dev_usage, entry);
|
|
|
|
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
|
2021-06-13 05:20:02 +08:00
|
|
|
unsigned expected = sizeof(*u);
|
2021-01-22 10:52:06 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (journal_entry_err_on(bytes < expected,
|
2023-10-25 08:44:36 +08:00
|
|
|
c, version, jset, entry,
|
|
|
|
journal_entry_dev_usage_bad_size,
|
|
|
|
"bad size (%u < %u)",
|
2021-01-22 10:52:06 +08:00
|
|
|
bytes, expected)) {
|
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (journal_entry_err_on(u->pad,
|
2023-10-25 08:44:36 +08:00
|
|
|
c, version, jset, entry,
|
|
|
|
journal_entry_dev_usage_bad_pad,
|
|
|
|
"bad pad")) {
|
2021-01-22 10:52:06 +08:00
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
struct jset_entry_dev_usage *u =
|
|
|
|
container_of(entry, struct jset_entry_dev_usage, entry);
|
|
|
|
unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
|
|
|
|
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
|
2022-01-01 06:06:29 +08:00
|
|
|
|
|
|
|
for (i = 0; i < nr_types; i++) {
|
2024-01-07 09:57:43 +08:00
|
|
|
bch2_prt_data_type(out, i);
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
|
2022-01-01 06:06:29 +08:00
|
|
|
le64_to_cpu(u->d[i].buckets),
|
|
|
|
le64_to_cpu(u->d[i].sectors),
|
|
|
|
le64_to_cpu(u->d[i].fragmented));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_entry_log_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2022-01-01 05:12:54 +08:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
|
|
|
|
unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
|
|
|
|
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "%.*s", bytes, l->d);
|
2022-01-01 06:06:29 +08:00
|
|
|
}
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
static int journal_entry_overwrite_validate(struct bch_fs *c,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2022-06-06 03:32:57 +08:00
|
|
|
{
|
2022-12-21 09:00:34 +08:00
|
|
|
return journal_entry_btree_keys_validate(c, jset, entry,
|
|
|
|
version, big_endian, READ);
|
2022-06-06 03:32:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
journal_entry_btree_keys_to_text(out, c, entry);
|
|
|
|
}
|
|
|
|
|
2023-11-03 06:57:19 +08:00
|
|
|
static int journal_entry_write_buffer_keys_validate(struct bch_fs *c,
|
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2023-11-03 06:57:19 +08:00
|
|
|
{
|
|
|
|
return journal_entry_btree_keys_validate(c, jset, entry,
|
|
|
|
version, big_endian, READ);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
journal_entry_btree_keys_to_text(out, c, entry);
|
|
|
|
}
|
|
|
|
|
2024-01-27 23:16:15 +08:00
|
|
|
static int journal_entry_datetime_validate(struct bch_fs *c,
|
|
|
|
struct jset *jset,
|
|
|
|
struct jset_entry *entry,
|
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2024-01-27 23:16:15 +08:00
|
|
|
{
|
|
|
|
unsigned bytes = vstruct_bytes(entry);
|
|
|
|
unsigned expected = 16;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (journal_entry_err_on(vstruct_bytes(entry) < expected,
|
|
|
|
c, version, jset, entry,
|
|
|
|
journal_entry_dev_usage_bad_size,
|
|
|
|
"bad size (%u < %u)",
|
|
|
|
bytes, expected)) {
|
|
|
|
journal_entry_null_range(entry, vstruct_next(entry));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void journal_entry_datetime_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
|
|
|
struct jset_entry_datetime *datetime =
|
|
|
|
container_of(entry, struct jset_entry_datetime, entry);
|
|
|
|
|
|
|
|
bch2_prt_datetime(out, le64_to_cpu(datetime->seconds));
|
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
struct jset_entry_ops {
|
2022-09-27 04:23:19 +08:00
|
|
|
int (*validate)(struct bch_fs *, struct jset *,
|
2023-08-07 00:43:31 +08:00
|
|
|
struct jset_entry *, unsigned, int,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags);
|
2022-01-01 06:06:29 +08:00
|
|
|
void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
|
2017-03-17 14:18:50 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct jset_entry_ops bch2_jset_entry_ops[] = {
|
|
|
|
#define x(f, nr) \
|
|
|
|
[BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
|
2022-01-01 06:06:29 +08:00
|
|
|
.validate = journal_entry_##f##_validate, \
|
|
|
|
.to_text = journal_entry_##f##_to_text, \
|
2017-03-17 14:18:50 +08:00
|
|
|
},
|
|
|
|
BCH_JSET_ENTRY_TYPES()
|
|
|
|
#undef x
|
|
|
|
};
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
int bch2_journal_entry_validate(struct bch_fs *c,
|
|
|
|
struct jset *jset,
|
2021-03-05 08:06:26 +08:00
|
|
|
struct jset_entry *entry,
|
2023-08-07 00:43:31 +08:00
|
|
|
unsigned version, int big_endian,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2019-01-25 06:12:00 +08:00
|
|
|
return entry->type < BCH_JSET_ENTRY_NR
|
2022-09-27 04:23:19 +08:00
|
|
|
? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
|
2023-08-07 00:43:31 +08:00
|
|
|
version, big_endian, flags)
|
2019-01-25 06:12:00 +08:00
|
|
|
: 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
|
|
|
|
struct jset_entry *entry)
|
|
|
|
{
|
2024-04-13 03:17:00 +08:00
|
|
|
bch2_prt_jset_entry_type(out, entry->type);
|
|
|
|
|
2022-01-01 06:06:29 +08:00
|
|
|
if (entry->type < BCH_JSET_ENTRY_NR) {
|
2024-04-13 03:17:00 +08:00
|
|
|
prt_str(out, ": ");
|
2022-01-01 06:06:29 +08:00
|
|
|
bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2023-08-06 22:57:25 +08:00
|
|
|
unsigned version = le32_to_cpu(jset->version);
|
2017-03-17 14:18:50 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
vstruct_for_each(jset, entry) {
|
2023-08-06 22:57:25 +08:00
|
|
|
if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
|
2023-10-25 08:44:36 +08:00
|
|
|
c, version, jset, entry,
|
|
|
|
journal_entry_past_jset_end,
|
2017-03-17 14:18:50 +08:00
|
|
|
"journal entry extends past end of jset")) {
|
|
|
|
jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
ret = bch2_journal_entry_validate(c, jset, entry,
|
2023-08-07 00:43:31 +08:00
|
|
|
version, JSET_BIG_ENDIAN(jset), flags);
|
2017-03-17 14:18:50 +08:00
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int jset_validate(struct bch_fs *c,
|
2020-08-25 03:58:26 +08:00
|
|
|
struct bch_dev *ca,
|
2017-03-17 14:18:50 +08:00
|
|
|
struct jset *jset, u64 sector,
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2018-11-02 03:10:01 +08:00
|
|
|
unsigned version;
|
2017-03-17 14:18:50 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (le64_to_cpu(jset->magic) != jset_magic(c))
|
|
|
|
return JOURNAL_ENTRY_NONE;
|
|
|
|
|
2018-11-02 03:10:01 +08:00
|
|
|
version = le32_to_cpu(jset->version);
|
2023-08-06 22:57:25 +08:00
|
|
|
if (journal_entry_err_on(!bch2_version_compatible(version),
|
|
|
|
c, version, jset, NULL,
|
2023-10-25 08:44:36 +08:00
|
|
|
jset_unsupported_version,
|
2023-06-28 10:09:35 +08:00
|
|
|
"%s sector %llu seq %llu: incompatible journal entry version %u.%u",
|
2021-01-15 05:21:22 +08:00
|
|
|
ca ? ca->name : c->name,
|
2023-06-28 10:09:35 +08:00
|
|
|
sector, le64_to_cpu(jset->seq),
|
|
|
|
BCH_VERSION_MAJOR(version),
|
|
|
|
BCH_VERSION_MINOR(version))) {
|
2020-11-14 03:39:43 +08:00
|
|
|
/* don't try to continue: */
|
2022-10-14 10:52:40 +08:00
|
|
|
return -EINVAL;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2022-09-27 04:23:19 +08:00
|
|
|
if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
|
2023-10-25 08:44:36 +08:00
|
|
|
c, version, jset, NULL,
|
|
|
|
jset_unknown_csum,
|
2020-08-25 03:58:26 +08:00
|
|
|
"%s sector %llu seq %llu: journal entry with unknown csum type %llu",
|
2021-01-15 05:21:22 +08:00
|
|
|
ca ? ca->name : c->name,
|
|
|
|
sector, le64_to_cpu(jset->seq),
|
2022-10-14 10:52:40 +08:00
|
|
|
JSET_CSUM_TYPE(jset)))
|
2020-11-14 03:39:43 +08:00
|
|
|
ret = JOURNAL_ENTRY_BAD;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2021-01-15 05:21:22 +08:00
|
|
|
/* last_seq is ignored when JSET_NO_FLUSH is true */
|
|
|
|
if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
|
2022-09-27 04:23:19 +08:00
|
|
|
le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
|
2023-08-06 22:57:25 +08:00
|
|
|
c, version, jset, NULL,
|
2023-10-25 08:44:36 +08:00
|
|
|
jset_last_seq_newer_than_seq,
|
2021-01-15 05:21:22 +08:00
|
|
|
"invalid journal entry: last_seq > seq (%llu > %llu)",
|
|
|
|
le64_to_cpu(jset->last_seq),
|
|
|
|
le64_to_cpu(jset->seq))) {
|
2017-03-17 14:18:50 +08:00
|
|
|
jset->last_seq = jset->seq;
|
2020-08-25 03:58:26 +08:00
|
|
|
return JOURNAL_ENTRY_BAD;
|
|
|
|
}
|
2022-10-14 10:52:40 +08:00
|
|
|
|
2023-08-07 00:43:31 +08:00
|
|
|
ret = jset_validate_entries(c, jset, flags);
|
2017-03-17 14:18:50 +08:00
|
|
|
fsck_err:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-10-14 10:52:40 +08:00
|
|
|
static int jset_validate_early(struct bch_fs *c,
|
|
|
|
struct bch_dev *ca,
|
|
|
|
struct jset *jset, u64 sector,
|
|
|
|
unsigned bucket_sectors_left,
|
|
|
|
unsigned sectors_read)
|
2021-01-15 05:21:22 +08:00
|
|
|
{
|
2022-10-14 10:52:40 +08:00
|
|
|
size_t bytes = vstruct_bytes(jset);
|
|
|
|
unsigned version;
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags = BCH_VALIDATE_journal;
|
2022-10-14 10:52:40 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (le64_to_cpu(jset->magic) != jset_magic(c))
|
|
|
|
return JOURNAL_ENTRY_NONE;
|
|
|
|
|
|
|
|
version = le32_to_cpu(jset->version);
|
2023-08-06 22:57:25 +08:00
|
|
|
if (journal_entry_err_on(!bch2_version_compatible(version),
|
2023-10-25 08:44:36 +08:00
|
|
|
c, version, jset, NULL,
|
|
|
|
jset_unsupported_version,
|
2023-06-28 10:09:35 +08:00
|
|
|
"%s sector %llu seq %llu: unknown journal entry version %u.%u",
|
2022-10-14 10:52:40 +08:00
|
|
|
ca ? ca->name : c->name,
|
2023-06-28 10:09:35 +08:00
|
|
|
sector, le64_to_cpu(jset->seq),
|
|
|
|
BCH_VERSION_MAJOR(version),
|
|
|
|
BCH_VERSION_MINOR(version))) {
|
2022-10-14 10:52:40 +08:00
|
|
|
/* don't try to continue: */
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bytes > (sectors_read << 9) &&
|
|
|
|
sectors_read < bucket_sectors_left)
|
|
|
|
return JOURNAL_ENTRY_REREAD;
|
2021-01-15 05:21:22 +08:00
|
|
|
|
2022-10-14 10:52:40 +08:00
|
|
|
if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
|
2023-10-25 08:44:36 +08:00
|
|
|
c, version, jset, NULL,
|
|
|
|
jset_past_bucket_end,
|
2022-10-14 10:52:40 +08:00
|
|
|
"%s sector %llu seq %llu: journal entry too big (%zu bytes)",
|
|
|
|
ca ? ca->name : c->name,
|
|
|
|
sector, le64_to_cpu(jset->seq), bytes))
|
|
|
|
le32_add_cpu(&jset->u64s,
|
|
|
|
-((bytes - (bucket_sectors_left << 9)) / 8));
|
|
|
|
fsck_err:
|
|
|
|
return ret;
|
2021-01-15 05:21:22 +08:00
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
struct journal_read_buf {
|
|
|
|
void *data;
|
|
|
|
size_t size;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int journal_read_buf_realloc(struct journal_read_buf *b,
|
|
|
|
size_t new_size)
|
|
|
|
{
|
|
|
|
void *n;
|
|
|
|
|
|
|
|
/* the bios are sized for this many pages, max: */
|
|
|
|
if (new_size > JOURNAL_ENTRY_SIZE_MAX)
|
2023-03-15 03:35:57 +08:00
|
|
|
return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
new_size = roundup_pow_of_two(new_size);
|
2024-02-01 19:35:46 +08:00
|
|
|
n = kvmalloc(new_size, GFP_KERNEL);
|
2017-03-17 14:18:50 +08:00
|
|
|
if (!n)
|
2023-03-15 03:35:57 +08:00
|
|
|
return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-02-01 19:35:46 +08:00
|
|
|
kvfree(b->data);
|
2017-03-17 14:18:50 +08:00
|
|
|
b->data = n;
|
|
|
|
b->size = new_size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int journal_read_bucket(struct bch_dev *ca,
|
|
|
|
struct journal_read_buf *buf,
|
|
|
|
struct journal_list *jlist,
|
2018-11-19 07:32:16 +08:00
|
|
|
unsigned bucket)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = ca->fs;
|
|
|
|
struct journal_device *ja = &ca->journal;
|
|
|
|
struct jset *j = NULL;
|
|
|
|
unsigned sectors, sectors_read = 0;
|
|
|
|
u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
|
|
|
|
end = offset + ca->mi.bucket_size;
|
2022-10-14 13:14:15 +08:00
|
|
|
bool saw_bad = false, csum_good;
|
2024-01-06 00:59:03 +08:00
|
|
|
struct printbuf err = PRINTBUF;
|
2017-03-17 14:18:50 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
pr_debug("reading %u", bucket);
|
|
|
|
|
|
|
|
while (offset < end) {
|
|
|
|
if (!sectors_read) {
|
2018-11-04 08:04:54 +08:00
|
|
|
struct bio *bio;
|
|
|
|
unsigned nr_bvecs;
|
|
|
|
reread:
|
|
|
|
sectors_read = min_t(unsigned,
|
2017-03-17 14:18:50 +08:00
|
|
|
end - offset, buf->size >> 9);
|
2018-11-04 08:04:54 +08:00
|
|
|
nr_bvecs = buf_pages(buf->data, sectors_read << 9);
|
|
|
|
|
|
|
|
bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
|
|
|
|
bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-07-04 07:27:42 +08:00
|
|
|
bio->bi_iter.bi_sector = offset;
|
|
|
|
bch2_bio_map(bio, buf->data, sectors_read << 9);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
ret = submit_bio_wait(bio);
|
2018-11-04 08:04:54 +08:00
|
|
|
kfree(bio);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2023-10-26 04:29:37 +08:00
|
|
|
if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
|
2020-12-04 02:57:22 +08:00
|
|
|
"journal read error: sector %llu",
|
2017-03-17 14:18:50 +08:00
|
|
|
offset) ||
|
2021-01-07 07:49:35 +08:00
|
|
|
bch2_meta_read_fault("journal")) {
|
|
|
|
/*
|
|
|
|
* We don't error out of the recovery process
|
|
|
|
* here, since the relevant journal entry may be
|
|
|
|
* found on a different device, and missing or
|
|
|
|
* no journal entries will be handled later
|
|
|
|
*/
|
2024-01-06 00:59:03 +08:00
|
|
|
goto out;
|
2021-01-07 07:49:35 +08:00
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
j = buf->data;
|
|
|
|
}
|
|
|
|
|
2022-10-14 10:52:40 +08:00
|
|
|
ret = jset_validate_early(c, ca, j, offset,
|
|
|
|
end - offset, sectors_read);
|
2017-03-17 14:18:50 +08:00
|
|
|
switch (ret) {
|
2022-07-20 05:20:18 +08:00
|
|
|
case 0:
|
2020-08-25 03:58:26 +08:00
|
|
|
sectors = vstruct_sectors(j, c->block_bits);
|
2017-03-17 14:18:50 +08:00
|
|
|
break;
|
|
|
|
case JOURNAL_ENTRY_REREAD:
|
|
|
|
if (vstruct_bytes(j) > buf->size) {
|
|
|
|
ret = journal_read_buf_realloc(buf,
|
|
|
|
vstruct_bytes(j));
|
|
|
|
if (ret)
|
2024-01-06 00:59:03 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
goto reread;
|
|
|
|
case JOURNAL_ENTRY_NONE:
|
|
|
|
if (!saw_bad)
|
2024-01-06 00:59:03 +08:00
|
|
|
goto out;
|
2020-08-25 03:58:26 +08:00
|
|
|
/*
|
|
|
|
* On checksum error we don't really trust the size
|
|
|
|
* field of the journal entry we read, so try reading
|
|
|
|
* again at next block boundary:
|
|
|
|
*/
|
2021-12-15 03:24:41 +08:00
|
|
|
sectors = block_sectors(c);
|
2022-10-14 10:52:40 +08:00
|
|
|
goto next_block;
|
2017-03-17 14:18:50 +08:00
|
|
|
default:
|
2024-01-06 00:59:03 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2024-04-26 12:31:14 +08:00
|
|
|
if (le64_to_cpu(j->seq) > ja->highest_seq_found) {
|
|
|
|
ja->highest_seq_found = le64_to_cpu(j->seq);
|
|
|
|
ja->cur_idx = bucket;
|
|
|
|
ja->sectors_free = ca->mi.bucket_size -
|
|
|
|
bucket_remainder(ca, offset) - sectors;
|
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
/*
|
|
|
|
* This happens sometimes if we don't have discards on -
|
|
|
|
* when we've partially overwritten a bucket with new
|
|
|
|
* journal entries. We don't need the rest of the
|
|
|
|
* bucket:
|
|
|
|
*/
|
|
|
|
if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
|
2024-01-06 00:59:03 +08:00
|
|
|
goto out;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
|
|
|
|
|
2024-01-06 00:59:03 +08:00
|
|
|
enum bch_csum_type csum_type = JSET_CSUM_TYPE(j);
|
|
|
|
struct bch_csum csum;
|
|
|
|
csum_good = jset_csum_good(c, j, &csum);
|
|
|
|
|
2023-10-26 04:29:37 +08:00
|
|
|
if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
|
2024-01-06 00:59:03 +08:00
|
|
|
"%s",
|
|
|
|
(printbuf_reset(&err),
|
|
|
|
prt_str(&err, "journal "),
|
|
|
|
bch2_csum_err_msg(&err, csum_type, j->csum, csum),
|
|
|
|
err.buf)))
|
2022-10-14 13:14:15 +08:00
|
|
|
saw_bad = true;
|
|
|
|
|
2022-10-14 10:52:40 +08:00
|
|
|
ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
|
|
|
|
j->encrypted_start,
|
|
|
|
vstruct_end(j) - (void *) j->encrypted_start);
|
2024-03-18 09:51:19 +08:00
|
|
|
bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret));
|
2022-10-14 10:52:40 +08:00
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
mutex_lock(&jlist->lock);
|
2022-02-19 14:18:18 +08:00
|
|
|
ret = journal_entry_add(c, ca, (struct journal_ptr) {
|
2022-10-14 13:14:15 +08:00
|
|
|
.csum_good = csum_good,
|
2022-02-19 14:18:18 +08:00
|
|
|
.dev = ca->dev_idx,
|
|
|
|
.bucket = bucket,
|
|
|
|
.bucket_offset = offset -
|
|
|
|
bucket_to_sector(ca, ja->buckets[bucket]),
|
|
|
|
.sector = offset,
|
2022-10-14 13:14:15 +08:00
|
|
|
}, jlist, j);
|
2017-03-17 14:18:50 +08:00
|
|
|
mutex_unlock(&jlist->lock);
|
|
|
|
|
|
|
|
switch (ret) {
|
|
|
|
case JOURNAL_ENTRY_ADD_OK:
|
|
|
|
break;
|
|
|
|
case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
|
|
|
|
break;
|
|
|
|
default:
|
2024-01-06 00:59:03 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
next_block:
|
|
|
|
pr_debug("next");
|
|
|
|
offset += sectors;
|
|
|
|
sectors_read -= sectors;
|
|
|
|
j = ((void *) j) + (sectors << 9);
|
|
|
|
}
|
|
|
|
|
2024-01-06 00:59:03 +08:00
|
|
|
out:
|
|
|
|
ret = 0;
|
|
|
|
err:
|
|
|
|
printbuf_exit(&err);
|
|
|
|
return ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2023-11-18 08:13:27 +08:00
|
|
|
static CLOSURE_CALLBACK(bch2_journal_read_device)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2023-11-18 08:13:27 +08:00
|
|
|
closure_type(ja, struct journal_device, read);
|
2017-03-17 14:18:50 +08:00
|
|
|
struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
|
2022-01-04 13:06:49 +08:00
|
|
|
struct bch_fs *c = ca->fs;
|
2017-03-17 14:18:50 +08:00
|
|
|
struct journal_list *jlist =
|
|
|
|
container_of(cl->parent, struct journal_list, cl);
|
|
|
|
struct journal_read_buf buf = { NULL, 0 };
|
2018-11-19 07:32:16 +08:00
|
|
|
unsigned i;
|
2022-01-05 08:45:39 +08:00
|
|
|
int ret = 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (!ja->nr)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
pr_debug("%u journal buckets", ja->nr);
|
|
|
|
|
|
|
|
for (i = 0; i < ja->nr; i++) {
|
2018-11-19 07:32:16 +08:00
|
|
|
ret = journal_read_bucket(ca, &buf, jlist, i);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-03-04 04:15:55 +08:00
|
|
|
* Set dirty_idx to indicate the entire journal is full and needs to be
|
2017-03-17 14:18:50 +08:00
|
|
|
* reclaimed - journal reclaim will immediately reclaim whatever isn't
|
|
|
|
* pinned when it first runs:
|
|
|
|
*/
|
2019-03-04 04:15:55 +08:00
|
|
|
ja->discard_idx = ja->dirty_idx_ondisk =
|
|
|
|
ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
|
2017-03-17 14:18:50 +08:00
|
|
|
out:
|
2022-01-04 13:06:49 +08:00
|
|
|
bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
|
2024-02-01 19:35:46 +08:00
|
|
|
kvfree(buf.data);
|
2017-03-17 14:18:50 +08:00
|
|
|
percpu_ref_put(&ca->io_ref);
|
|
|
|
closure_return(cl);
|
|
|
|
return;
|
|
|
|
err:
|
|
|
|
mutex_lock(&jlist->lock);
|
|
|
|
jlist->ret = ret;
|
|
|
|
mutex_unlock(&jlist->lock);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-12-14 23:39:04 +08:00
|
|
|
int bch2_journal_read(struct bch_fs *c,
|
|
|
|
u64 *last_seq,
|
|
|
|
u64 *blacklist_seq,
|
|
|
|
u64 *start_seq)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct journal_list jlist;
|
2022-03-21 12:15:53 +08:00
|
|
|
struct journal_replay *i, **_i, *prev = NULL;
|
|
|
|
struct genradix_iter radix_iter;
|
2022-02-26 02:18:19 +08:00
|
|
|
struct printbuf buf = PRINTBUF;
|
2022-11-20 10:40:35 +08:00
|
|
|
bool degraded = false, last_write_torn = false;
|
2022-12-14 23:39:04 +08:00
|
|
|
u64 seq;
|
2017-03-17 14:18:50 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
closure_init_stack(&jlist.cl);
|
|
|
|
mutex_init(&jlist.lock);
|
2022-04-11 04:26:34 +08:00
|
|
|
jlist.last_seq = 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
jlist.ret = 0;
|
|
|
|
|
2023-12-17 12:47:29 +08:00
|
|
|
for_each_member_device(c, ca) {
|
2022-04-13 08:03:19 +08:00
|
|
|
if (!c->opts.fsck &&
|
2020-07-10 06:28:11 +08:00
|
|
|
!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
|
2017-03-17 14:18:50 +08:00
|
|
|
continue;
|
|
|
|
|
2021-02-21 08:47:58 +08:00
|
|
|
if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
|
|
|
|
ca->mi.state == BCH_MEMBER_STATE_ro) &&
|
2017-03-17 14:18:50 +08:00
|
|
|
percpu_ref_tryget(&ca->io_ref))
|
|
|
|
closure_call(&ca->journal.read,
|
|
|
|
bch2_journal_read_device,
|
|
|
|
system_unbound_wq,
|
|
|
|
&jlist.cl);
|
|
|
|
else
|
|
|
|
degraded = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
closure_sync(&jlist.cl);
|
|
|
|
|
|
|
|
if (jlist.ret)
|
|
|
|
return jlist.ret;
|
|
|
|
|
2022-12-14 23:39:04 +08:00
|
|
|
*last_seq = 0;
|
2022-11-20 10:20:58 +08:00
|
|
|
*start_seq = 0;
|
|
|
|
*blacklist_seq = 0;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Find most recent flush entry, and ignore newer non flush entries -
|
|
|
|
* those entries will be blacklisted:
|
|
|
|
*/
|
2022-03-21 12:15:53 +08:00
|
|
|
genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
|
2024-05-09 06:40:42 +08:00
|
|
|
enum bch_validate_flags flags = BCH_VALIDATE_journal;
|
2022-11-20 10:40:35 +08:00
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
i = *_i;
|
|
|
|
|
2024-02-26 07:48:21 +08:00
|
|
|
if (journal_replay_ignore(i))
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
continue;
|
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
if (!*start_seq)
|
2022-11-20 10:20:58 +08:00
|
|
|
*blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1;
|
2022-03-21 12:15:53 +08:00
|
|
|
|
2022-11-20 10:40:35 +08:00
|
|
|
if (JSET_NO_FLUSH(&i->j)) {
|
2024-02-26 07:48:21 +08:00
|
|
|
i->ignore_blacklisted = true;
|
2022-11-20 10:40:35 +08:00
|
|
|
continue;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
}
|
|
|
|
|
2022-11-20 10:40:35 +08:00
|
|
|
if (!last_write_torn && !i->csum_good) {
|
|
|
|
last_write_torn = true;
|
2024-02-26 07:48:21 +08:00
|
|
|
i->ignore_blacklisted = true;
|
2022-11-20 10:40:35 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
|
2023-08-06 22:57:25 +08:00
|
|
|
c, le32_to_cpu(i->j.version), &i->j, NULL,
|
2023-10-25 08:44:36 +08:00
|
|
|
jset_last_seq_newer_than_seq,
|
2022-11-20 10:40:35 +08:00
|
|
|
"invalid journal entry: last_seq > seq (%llu > %llu)",
|
|
|
|
le64_to_cpu(i->j.last_seq),
|
|
|
|
le64_to_cpu(i->j.seq)))
|
|
|
|
i->j.last_seq = i->j.seq;
|
|
|
|
|
2022-12-14 23:39:04 +08:00
|
|
|
*last_seq = le64_to_cpu(i->j.last_seq);
|
2022-11-20 10:40:35 +08:00
|
|
|
*blacklist_seq = le64_to_cpu(i->j.seq) + 1;
|
|
|
|
break;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
}
|
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
if (!*start_seq) {
|
|
|
|
bch_info(c, "journal read done, but no entries found");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-12-14 23:39:04 +08:00
|
|
|
if (!*last_seq) {
|
2023-10-25 08:44:36 +08:00
|
|
|
fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes,
|
|
|
|
"journal read done, but no entries found after dropping non-flushes");
|
2022-12-02 00:17:18 +08:00
|
|
|
return 0;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
}
|
|
|
|
|
2022-11-20 10:20:58 +08:00
|
|
|
bch_info(c, "journal read done, replaying entries %llu-%llu",
|
2022-12-14 23:39:04 +08:00
|
|
|
*last_seq, *blacklist_seq - 1);
|
2022-11-20 10:20:58 +08:00
|
|
|
|
|
|
|
if (*start_seq != *blacklist_seq)
|
|
|
|
bch_info(c, "dropped unflushed entries %llu-%llu",
|
|
|
|
*blacklist_seq, *start_seq - 1);
|
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
/* Drop blacklisted entries and entries older than last_seq: */
|
2022-03-21 12:15:53 +08:00
|
|
|
genradix_for_each(&c->journal_entries, radix_iter, _i) {
|
|
|
|
i = *_i;
|
|
|
|
|
2024-02-26 07:48:21 +08:00
|
|
|
if (journal_replay_ignore(i))
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
seq = le64_to_cpu(i->j.seq);
|
2022-12-14 23:39:04 +08:00
|
|
|
if (seq < *last_seq) {
|
2024-02-26 07:48:21 +08:00
|
|
|
journal_replay_free(c, i, false);
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
|
|
|
|
fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
|
2023-10-25 08:44:36 +08:00
|
|
|
jset_seq_blacklisted,
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
"found blacklisted journal entry %llu", seq);
|
2024-02-26 07:48:21 +08:00
|
|
|
i->ignore_blacklisted = true;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for missing entries: */
|
2022-12-14 23:39:04 +08:00
|
|
|
seq = *last_seq;
|
2022-03-21 12:15:53 +08:00
|
|
|
genradix_for_each(&c->journal_entries, radix_iter, _i) {
|
|
|
|
i = *_i;
|
|
|
|
|
2024-02-26 07:48:21 +08:00
|
|
|
if (journal_replay_ignore(i))
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
BUG_ON(seq > le64_to_cpu(i->j.seq));
|
|
|
|
|
|
|
|
while (seq < le64_to_cpu(i->j.seq)) {
|
|
|
|
u64 missing_start, missing_end;
|
2022-02-26 02:18:19 +08:00
|
|
|
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
|
|
|
|
while (seq < le64_to_cpu(i->j.seq) &&
|
|
|
|
bch2_journal_seq_is_blacklisted(c, seq, false))
|
|
|
|
seq++;
|
|
|
|
|
|
|
|
if (seq == le64_to_cpu(i->j.seq))
|
|
|
|
break;
|
|
|
|
|
|
|
|
missing_start = seq;
|
|
|
|
|
|
|
|
while (seq < le64_to_cpu(i->j.seq) &&
|
|
|
|
!bch2_journal_seq_is_blacklisted(c, seq, false))
|
|
|
|
seq++;
|
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
if (prev) {
|
|
|
|
bch2_journal_ptrs_to_text(&buf1, c, prev);
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
|
2021-01-27 05:04:12 +08:00
|
|
|
} else
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(&buf1, "(none)");
|
2022-02-26 02:18:19 +08:00
|
|
|
bch2_journal_ptrs_to_text(&buf2, c, i);
|
2021-01-27 05:04:12 +08:00
|
|
|
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
missing_end = seq - 1;
|
2023-10-25 08:44:36 +08:00
|
|
|
fsck_err(c, journal_entries_missing,
|
|
|
|
"journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
|
2021-01-27 05:04:12 +08:00
|
|
|
" prev at %s\n"
|
2024-03-28 12:39:11 +08:00
|
|
|
" next at %s, continue?",
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
missing_start, missing_end,
|
2022-12-14 23:39:04 +08:00
|
|
|
*last_seq, *blacklist_seq - 1,
|
2022-02-26 02:18:19 +08:00
|
|
|
buf1.buf, buf2.buf);
|
|
|
|
|
|
|
|
printbuf_exit(&buf1);
|
|
|
|
printbuf_exit(&buf2);
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
}
|
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
prev = i;
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
seq++;
|
|
|
|
}
|
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
genradix_for_each(&c->journal_entries, radix_iter, _i) {
|
2021-01-27 05:04:12 +08:00
|
|
|
struct bch_replicas_padded replicas = {
|
|
|
|
.e.data_type = BCH_DATA_journal,
|
|
|
|
.e.nr_required = 1,
|
|
|
|
};
|
2019-01-22 04:32:13 +08:00
|
|
|
|
2022-03-21 12:15:53 +08:00
|
|
|
i = *_i;
|
2024-02-26 07:48:21 +08:00
|
|
|
if (journal_replay_ignore(i))
|
bcachefs: Don't require flush/fua on every journal write
This patch adds a flag to journal entries which, if set, indicates that
they weren't done as flush/fua writes.
- non flush/fua journal writes don't update last_seq (i.e. they don't
free up space in the journal), thus the journal free space
calculations now check whether nonflush journal writes are currently
allowed (i.e. are we low on free space, or would doing a flush write
free up a lot of space in the journal)
- write_delay_ms, the user configurable option for when open journal
entries are automatically written, is now interpreted as the max
delay between flush journal writes (default 1 second).
- bch2_journal_flush_seq_async is changed to ensure a flush write >=
the requested sequence number has happened
- journal read/replay must now ignore, and blacklist, any journal
entries newer than the most recent flush entry in the journal. Also,
the way the read_entire_journal option is handled has been improved;
struct journal_replay now has an entry, 'ignore', for entries that
were read but should not be used.
- assorted refactoring and improvements related to journal read in
journal_io.c and recovery.c
Previously, we'd have to issue a flush/fua write every time we
accumulated a full journal entry - typically the bucket size. Now we
need to issue them much less frequently: when an fsync is requested, or
it's been more than write_delay_ms since the last flush, or when we need
to free up space in the journal. This is a significant performance
improvement on many write heavy workloads.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2020-11-14 22:59:58 +08:00
|
|
|
continue;
|
|
|
|
|
2024-01-27 13:05:03 +08:00
|
|
|
darray_for_each(i->ptrs, ptr) {
|
2024-05-01 15:59:45 +08:00
|
|
|
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
|
2022-10-14 10:52:40 +08:00
|
|
|
|
2024-01-27 13:05:03 +08:00
|
|
|
if (!ptr->csum_good)
|
|
|
|
bch_err_dev_offset(ca, ptr->sector,
|
2022-11-16 09:25:08 +08:00
|
|
|
"invalid journal checksum, seq %llu%s",
|
|
|
|
le64_to_cpu(i->j.seq),
|
|
|
|
i->csum_good ? " (had good copy on another device)" : "");
|
2022-10-14 10:52:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = jset_validate(c,
|
2024-05-01 15:59:45 +08:00
|
|
|
bch2_dev_have_ref(c, i->ptrs.data[0].dev),
|
2022-10-14 10:52:40 +08:00
|
|
|
&i->j,
|
2024-01-27 13:05:03 +08:00
|
|
|
i->ptrs.data[0].sector,
|
2022-10-14 10:52:40 +08:00
|
|
|
READ);
|
2017-03-17 14:18:50 +08:00
|
|
|
if (ret)
|
2022-02-26 02:18:19 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-01-27 13:05:03 +08:00
|
|
|
darray_for_each(i->ptrs, ptr)
|
|
|
|
replicas.e.devs[replicas.e.nr_devs++] = ptr->dev;
|
2021-01-27 05:04:12 +08:00
|
|
|
|
2021-02-03 04:56:44 +08:00
|
|
|
bch2_replicas_entry_sort(&replicas.e);
|
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
printbuf_reset(&buf);
|
|
|
|
bch2_replicas_entry_to_text(&buf, &replicas.e);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
if (!degraded &&
|
2023-08-12 07:30:38 +08:00
|
|
|
!bch2_replicas_marked(c, &replicas.e) &&
|
|
|
|
(le64_to_cpu(i->j.seq) == *last_seq ||
|
2023-10-25 08:44:36 +08:00
|
|
|
fsck_err(c, journal_entry_replicas_not_marked,
|
|
|
|
"superblock not marked as containing replicas for journal entry %llu\n %s",
|
2023-08-12 07:30:38 +08:00
|
|
|
le64_to_cpu(i->j.seq), buf.buf))) {
|
2019-01-22 04:32:13 +08:00
|
|
|
ret = bch2_mark_replicas(c, &replicas.e);
|
2017-03-17 14:18:50 +08:00
|
|
|
if (ret)
|
2022-02-26 02:18:19 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
}
|
2022-02-26 02:18:19 +08:00
|
|
|
err:
|
2017-03-17 14:18:50 +08:00
|
|
|
fsck_err:
|
2022-02-26 02:18:19 +08:00
|
|
|
printbuf_exit(&buf);
|
2017-03-17 14:18:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* journal write: */
|
|
|
|
|
2018-11-19 07:32:16 +08:00
|
|
|
static void __journal_write_alloc(struct journal *j,
|
|
|
|
struct journal_buf *w,
|
|
|
|
struct dev_alloc_list *devs_sorted,
|
|
|
|
unsigned sectors,
|
|
|
|
unsigned *replicas,
|
|
|
|
unsigned replicas_want)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
struct journal_device *ja;
|
|
|
|
struct bch_dev *ca;
|
2018-11-19 07:32:16 +08:00
|
|
|
unsigned i;
|
2018-10-01 06:28:23 +08:00
|
|
|
|
2018-11-19 07:32:16 +08:00
|
|
|
if (*replicas >= replicas_want)
|
|
|
|
return;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-11-19 07:32:16 +08:00
|
|
|
for (i = 0; i < devs_sorted->nr; i++) {
|
|
|
|
ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
|
2017-03-17 14:18:50 +08:00
|
|
|
if (!ca)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ja = &ca->journal;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that we can use this device, and aren't already using
|
|
|
|
* it:
|
|
|
|
*/
|
2018-11-19 07:32:16 +08:00
|
|
|
if (!ca->mi.durability ||
|
2021-02-21 08:47:58 +08:00
|
|
|
ca->mi.state != BCH_MEMBER_STATE_rw ||
|
2018-11-19 07:32:16 +08:00
|
|
|
!ja->nr ||
|
2023-03-11 05:28:37 +08:00
|
|
|
bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) ||
|
2018-11-19 07:32:16 +08:00
|
|
|
sectors > ja->sectors_free)
|
2017-03-17 14:18:50 +08:00
|
|
|
continue;
|
|
|
|
|
2020-07-23 01:27:00 +08:00
|
|
|
bch2_dev_stripe_increment(ca, &j->wp.stripe);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-11-02 03:10:01 +08:00
|
|
|
bch2_bkey_append_ptr(&w->key,
|
2017-03-17 14:18:50 +08:00
|
|
|
(struct bch_extent_ptr) {
|
|
|
|
.offset = bucket_to_sector(ca,
|
2018-11-19 07:32:16 +08:00
|
|
|
ja->buckets[ja->cur_idx]) +
|
|
|
|
ca->mi.bucket_size -
|
|
|
|
ja->sectors_free,
|
2017-03-17 14:18:50 +08:00
|
|
|
.dev = ca->dev_idx,
|
|
|
|
});
|
|
|
|
|
2018-11-19 07:32:16 +08:00
|
|
|
ja->sectors_free -= sectors;
|
|
|
|
ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
|
|
|
|
|
|
|
|
*replicas += ca->mi.durability;
|
|
|
|
|
|
|
|
if (*replicas >= replicas_want)
|
|
|
|
break;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
2018-11-19 07:32:16 +08:00
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-11-19 07:32:16 +08:00
|
|
|
/**
|
2023-09-13 06:41:22 +08:00
|
|
|
* journal_write_alloc - decide where to write next journal entry
|
|
|
|
*
|
|
|
|
* @j: journal object
|
|
|
|
* @w: journal buf (entry to be written)
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, or -EROFS on failure
|
2018-11-19 07:32:16 +08:00
|
|
|
*/
|
2023-09-13 06:41:22 +08:00
|
|
|
static int journal_write_alloc(struct journal *j, struct journal_buf *w)
|
2018-11-19 07:32:16 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
2021-01-30 04:37:28 +08:00
|
|
|
struct bch_devs_mask devs;
|
2018-11-19 07:32:16 +08:00
|
|
|
struct journal_device *ja;
|
|
|
|
struct bch_dev *ca;
|
|
|
|
struct dev_alloc_list devs_sorted;
|
2023-09-13 06:41:22 +08:00
|
|
|
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
|
2021-01-30 04:37:28 +08:00
|
|
|
unsigned target = c->opts.metadata_target ?:
|
|
|
|
c->opts.foreground_target;
|
2018-11-19 07:32:16 +08:00
|
|
|
unsigned i, replicas = 0, replicas_want =
|
|
|
|
READ_ONCE(c->opts.metadata_replicas);
|
2024-02-11 10:01:40 +08:00
|
|
|
unsigned replicas_need = min_t(unsigned, replicas_want,
|
|
|
|
READ_ONCE(c->opts.metadata_replicas_required));
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-11-19 07:32:16 +08:00
|
|
|
rcu_read_lock();
|
2021-01-30 04:37:28 +08:00
|
|
|
retry:
|
|
|
|
devs = target_rw_devs(c, BCH_DATA_journal, target);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2021-01-30 04:37:28 +08:00
|
|
|
devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-11-19 07:32:16 +08:00
|
|
|
__journal_write_alloc(j, w, &devs_sorted,
|
|
|
|
sectors, &replicas, replicas_want);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-11-19 07:32:16 +08:00
|
|
|
if (replicas >= replicas_want)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
for (i = 0; i < devs_sorted.nr; i++) {
|
|
|
|
ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
|
|
|
|
if (!ca)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ja = &ca->journal;
|
|
|
|
|
|
|
|
if (sectors > ja->sectors_free &&
|
|
|
|
sectors <= ca->mi.bucket_size &&
|
2019-03-04 05:50:40 +08:00
|
|
|
bch2_journal_dev_buckets_available(j, ja,
|
|
|
|
journal_space_discarded)) {
|
2018-11-19 07:32:16 +08:00
|
|
|
ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
|
|
|
|
ja->sectors_free = ca->mi.bucket_size;
|
2019-02-20 02:41:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ja->bucket_seq[ja->cur_idx] must always have
|
|
|
|
* something sensible:
|
|
|
|
*/
|
|
|
|
ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
|
2018-11-19 07:32:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
__journal_write_alloc(j, w, &devs_sorted,
|
|
|
|
sectors, &replicas, replicas_want);
|
2021-01-30 04:37:28 +08:00
|
|
|
|
|
|
|
if (replicas < replicas_want && target) {
|
|
|
|
/* Retry from all devices: */
|
|
|
|
target = 0;
|
|
|
|
goto retry;
|
|
|
|
}
|
2018-11-19 07:32:16 +08:00
|
|
|
done:
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2020-12-18 04:08:58 +08:00
|
|
|
BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
|
|
|
|
|
2024-02-11 10:01:40 +08:00
|
|
|
return replicas >= replicas_need ? 0 : -EROFS;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
|
|
|
|
{
|
2023-11-03 06:57:19 +08:00
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
/* we aren't holding j->lock: */
|
|
|
|
unsigned new_size = READ_ONCE(j->buf_size_want);
|
|
|
|
void *new_buf;
|
|
|
|
|
2019-02-19 06:39:42 +08:00
|
|
|
if (buf->buf_size >= new_size)
|
2017-03-17 14:18:50 +08:00
|
|
|
return;
|
|
|
|
|
2023-11-03 06:57:19 +08:00
|
|
|
size_t btree_write_buffer_size = new_size / 64;
|
|
|
|
|
|
|
|
if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
|
|
|
|
return;
|
|
|
|
|
2024-02-01 19:35:46 +08:00
|
|
|
new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
|
2017-03-17 14:18:50 +08:00
|
|
|
if (!new_buf)
|
|
|
|
return;
|
|
|
|
|
2019-02-19 06:39:42 +08:00
|
|
|
memcpy(new_buf, buf->data, buf->buf_size);
|
2021-01-05 04:46:57 +08:00
|
|
|
|
|
|
|
spin_lock(&j->lock);
|
|
|
|
swap(buf->data, new_buf);
|
|
|
|
swap(buf->buf_size, new_size);
|
|
|
|
spin_unlock(&j->lock);
|
|
|
|
|
2024-02-01 19:35:46 +08:00
|
|
|
kvfree(new_buf);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2020-11-14 07:36:33 +08:00
|
|
|
static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
|
|
|
|
{
|
2022-03-01 08:17:27 +08:00
|
|
|
return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
|
2020-11-14 07:36:33 +08:00
|
|
|
}
|
|
|
|
|
2023-11-18 08:13:27 +08:00
|
|
|
static CLOSURE_CALLBACK(journal_write_done)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2024-02-01 02:42:48 +08:00
|
|
|
closure_type(w, struct journal_buf, io);
|
|
|
|
struct journal *j = container_of(w, struct journal, buf[w->idx]);
|
2017-03-17 14:18:50 +08:00
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
2023-08-12 07:30:38 +08:00
|
|
|
struct bch_replicas_padded replicas;
|
2020-11-14 07:36:33 +08:00
|
|
|
union journal_res_state old, new;
|
2024-02-01 03:26:15 +08:00
|
|
|
u64 v, seq = le64_to_cpu(w->data->seq);
|
2020-11-15 05:04:30 +08:00
|
|
|
int err = 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2021-12-11 04:41:38 +08:00
|
|
|
bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
|
|
|
|
? j->flush_write_time
|
|
|
|
: j->noflush_write_time, j->write_start_time);
|
2018-07-23 19:52:00 +08:00
|
|
|
|
2021-05-28 11:16:25 +08:00
|
|
|
if (!w->devs_written.nr) {
|
2017-03-17 14:18:50 +08:00
|
|
|
bch_err(c, "unable to write journal to sufficient devices");
|
2020-11-15 05:04:30 +08:00
|
|
|
err = -EIO;
|
2023-08-12 07:30:38 +08:00
|
|
|
} else {
|
|
|
|
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
|
|
|
|
w->devs_written);
|
|
|
|
if (bch2_mark_replicas(c, &replicas.e))
|
|
|
|
err = -EIO;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
2023-08-12 07:30:38 +08:00
|
|
|
|
2020-11-15 05:04:30 +08:00
|
|
|
if (err)
|
|
|
|
bch2_fatal_error(c);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
closure_debug_destroy(cl);
|
2021-01-15 05:21:22 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
spin_lock(&j->lock);
|
2017-03-17 14:18:50 +08:00
|
|
|
if (seq >= j->pin.front)
|
2021-05-28 11:16:25 +08:00
|
|
|
journal_seq_pin(j, seq)->devs = w->devs_written;
|
2024-02-01 03:26:15 +08:00
|
|
|
if (err && (!j->err_seq || seq < j->err_seq))
|
|
|
|
j->err_seq = seq;
|
|
|
|
w->write_done = true;
|
|
|
|
|
|
|
|
bool completed = false;
|
|
|
|
|
|
|
|
for (seq = journal_last_unwritten_seq(j);
|
|
|
|
seq <= journal_cur_seq(j);
|
|
|
|
seq++) {
|
|
|
|
w = j->buf + (seq & JOURNAL_BUF_MASK);
|
|
|
|
if (!w->write_done)
|
|
|
|
break;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
if (!j->err_seq && !JSET_NO_FLUSH(w->data)) {
|
2021-11-16 06:30:11 +08:00
|
|
|
j->flushed_seq_ondisk = seq;
|
|
|
|
j->last_seq_ondisk = w->last_seq;
|
2022-01-10 09:48:31 +08:00
|
|
|
|
2022-02-10 17:32:19 +08:00
|
|
|
bch2_do_discards(c);
|
2022-01-10 09:48:31 +08:00
|
|
|
closure_wake_up(&c->freelist_wait);
|
|
|
|
bch2_reset_alloc_cursors(c);
|
2021-11-16 06:30:11 +08:00
|
|
|
}
|
2019-03-04 04:15:55 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
j->seq_ondisk = seq;
|
2022-03-01 05:35:42 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
/*
|
|
|
|
* Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
|
|
|
|
* more buckets:
|
|
|
|
*
|
|
|
|
* Must come before signaling write completion, for
|
|
|
|
* bch2_fs_journal_stop():
|
|
|
|
*/
|
|
|
|
if (j->watermark != BCH_WATERMARK_stripe)
|
|
|
|
journal_reclaim_kick(&c->journal);
|
2020-11-15 05:04:30 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
v = atomic64_read(&j->reservations.counter);
|
|
|
|
do {
|
|
|
|
old.v = new.v = v;
|
|
|
|
BUG_ON(journal_state_count(new, new.unwritten_idx));
|
|
|
|
BUG_ON(new.unwritten_idx != (seq & JOURNAL_BUF_MASK));
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
new.unwritten_idx++;
|
|
|
|
} while ((v = atomic64_cmpxchg(&j->reservations.counter, old.v, new.v)) != old.v);
|
2020-11-14 07:36:33 +08:00
|
|
|
|
2024-03-02 01:49:24 +08:00
|
|
|
closure_wake_up(&w->wait);
|
2024-02-01 03:26:15 +08:00
|
|
|
completed = true;
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
if (completed) {
|
|
|
|
bch2_journal_reclaim_fast(j);
|
|
|
|
bch2_journal_space_available(j);
|
2020-12-06 05:25:05 +08:00
|
|
|
|
2024-03-14 08:16:40 +08:00
|
|
|
track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], false);
|
2023-11-10 11:07:42 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
journal_wake(j);
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2024-02-01 03:26:15 +08:00
|
|
|
if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
|
2022-03-01 08:29:19 +08:00
|
|
|
new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
|
2022-02-25 23:28:20 +08:00
|
|
|
struct journal_buf *buf = journal_cur_buf(j);
|
|
|
|
long delta = buf->expires - jiffies;
|
2020-11-14 07:36:33 +08:00
|
|
|
|
2022-03-01 08:29:19 +08:00
|
|
|
/*
|
|
|
|
* We don't close a journal entry to write it while there's
|
|
|
|
* previous entries still in flight - the current journal entry
|
|
|
|
* might want to be written now:
|
|
|
|
*/
|
2024-02-01 00:21:46 +08:00
|
|
|
mod_delayed_work(j->wq, &j->write_work, max(0L, delta));
|
2022-03-01 08:29:19 +08:00
|
|
|
}
|
2024-02-01 03:26:15 +08:00
|
|
|
|
2024-06-24 00:55:16 +08:00
|
|
|
/*
|
|
|
|
* We don't typically trigger journal writes from her - the next journal
|
|
|
|
* write will be triggered immediately after the previous one is
|
|
|
|
* allocated, in bch2_journal_write() - but the journal write error path
|
|
|
|
* is special:
|
|
|
|
*/
|
|
|
|
bch2_journal_do_writes(j);
|
2024-02-01 03:26:15 +08:00
|
|
|
spin_unlock(&j->lock);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void journal_write_endio(struct bio *bio)
|
|
|
|
{
|
2024-02-01 02:42:48 +08:00
|
|
|
struct journal_bio *jbio = container_of(bio, struct journal_bio, bio);
|
|
|
|
struct bch_dev *ca = jbio->ca;
|
2017-03-17 14:18:50 +08:00
|
|
|
struct journal *j = &ca->fs->journal;
|
2024-02-01 02:42:48 +08:00
|
|
|
struct journal_buf *w = j->buf + jbio->buf_idx;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2023-10-26 04:29:37 +08:00
|
|
|
if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
|
|
|
|
"error writing journal entry %llu: %s",
|
2021-05-28 11:16:25 +08:00
|
|
|
le64_to_cpu(w->data->seq),
|
2020-07-22 01:34:22 +08:00
|
|
|
bch2_blk_status_to_str(bio->bi_status)) ||
|
2017-03-17 14:18:50 +08:00
|
|
|
bch2_meta_write_fault("journal")) {
|
2024-02-01 02:42:48 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
spin_lock_irqsave(&j->err_lock, flags);
|
2021-05-28 11:16:25 +08:00
|
|
|
bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
|
2017-03-17 14:18:50 +08:00
|
|
|
spin_unlock_irqrestore(&j->err_lock, flags);
|
|
|
|
}
|
|
|
|
|
2024-02-01 02:42:48 +08:00
|
|
|
closure_put(&w->io);
|
2017-03-17 14:18:50 +08:00
|
|
|
percpu_ref_put(&ca->io_ref);
|
|
|
|
}
|
|
|
|
|
2024-04-20 09:54:32 +08:00
|
|
|
static CLOSURE_CALLBACK(journal_write_submit)
|
2021-01-17 04:40:33 +08:00
|
|
|
{
|
2024-02-01 02:42:48 +08:00
|
|
|
closure_type(w, struct journal_buf, io);
|
|
|
|
struct journal *j = container_of(w, struct journal, buf[w->idx]);
|
2021-01-17 04:40:33 +08:00
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
|
|
|
|
|
|
|
|
extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
|
2024-05-01 03:37:51 +08:00
|
|
|
struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
|
2024-05-04 00:54:25 +08:00
|
|
|
if (!ca) {
|
2021-01-17 04:40:33 +08:00
|
|
|
/* XXX: fix this */
|
|
|
|
bch_err(c, "missing device for journal write\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
|
|
|
|
sectors);
|
|
|
|
|
2024-05-04 00:54:25 +08:00
|
|
|
struct journal_device *ja = &ca->journal;
|
2024-02-01 02:42:48 +08:00
|
|
|
struct bio *bio = &ja->bio[w->idx]->bio;
|
2021-01-17 04:40:33 +08:00
|
|
|
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
|
|
|
|
bio->bi_iter.bi_sector = ptr->offset;
|
|
|
|
bio->bi_end_io = journal_write_endio;
|
|
|
|
bio->bi_private = ca;
|
|
|
|
|
2021-01-30 02:58:10 +08:00
|
|
|
BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
|
|
|
|
ca->prev_journal_sector = bio->bi_iter.bi_sector;
|
|
|
|
|
2021-01-17 04:40:33 +08:00
|
|
|
if (!JSET_NO_FLUSH(w->data))
|
|
|
|
bio->bi_opf |= REQ_FUA;
|
|
|
|
if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
|
|
|
|
bio->bi_opf |= REQ_PREFLUSH;
|
|
|
|
|
|
|
|
bch2_bio_map(bio, w->data, sectors << 9);
|
|
|
|
|
2022-08-28 00:48:36 +08:00
|
|
|
trace_and_count(c, journal_write, bio);
|
2021-01-17 04:40:33 +08:00
|
|
|
closure_bio_submit(bio, cl);
|
|
|
|
|
2024-02-01 02:20:28 +08:00
|
|
|
ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
|
2021-01-17 04:40:33 +08:00
|
|
|
}
|
|
|
|
|
2024-02-01 00:21:46 +08:00
|
|
|
continue_at(cl, journal_write_done, j->wq);
|
2021-01-17 04:40:33 +08:00
|
|
|
}
|
|
|
|
|
2024-04-20 09:54:32 +08:00
|
|
|
static CLOSURE_CALLBACK(journal_write_preflush)
|
|
|
|
{
|
|
|
|
closure_type(w, struct journal_buf, io);
|
|
|
|
struct journal *j = container_of(w, struct journal, buf[w->idx]);
|
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
|
|
|
|
if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
|
|
|
|
spin_lock(&j->lock);
|
|
|
|
closure_wait(&j->async_wait, cl);
|
|
|
|
spin_unlock(&j->lock);
|
|
|
|
|
|
|
|
continue_at(cl, journal_write_preflush, j->wq);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (w->separate_flush) {
|
|
|
|
for_each_rw_member(c, ca) {
|
|
|
|
percpu_ref_get(&ca->io_ref);
|
|
|
|
|
|
|
|
struct journal_device *ja = &ca->journal;
|
|
|
|
struct bio *bio = &ja->bio[w->idx]->bio;
|
|
|
|
bio_reset(bio, ca->disk_sb.bdev,
|
|
|
|
REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
|
|
|
|
bio->bi_end_io = journal_write_endio;
|
|
|
|
bio->bi_private = ca;
|
|
|
|
closure_bio_submit(bio, cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
continue_at(cl, journal_write_submit, j->wq);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* no need to punt to another work item if we're not waiting on
|
|
|
|
* preflushes
|
|
|
|
*/
|
|
|
|
journal_write_submit(&cl->work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-03 09:43:26 +08:00
|
|
|
static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
|
2023-02-19 13:49:51 +08:00
|
|
|
{
|
2023-11-03 09:43:26 +08:00
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
2023-12-17 15:19:23 +08:00
|
|
|
struct jset_entry *start, *end;
|
2023-11-03 09:43:26 +08:00
|
|
|
struct jset *jset = w->data;
|
2023-11-03 06:57:19 +08:00
|
|
|
struct journal_keys_to_wb wb = { NULL };
|
2023-11-03 09:43:26 +08:00
|
|
|
unsigned sectors, bytes, u64s;
|
|
|
|
unsigned long btree_roots_have = 0;
|
2023-11-03 06:57:19 +08:00
|
|
|
bool validate_before_checksum = false;
|
|
|
|
u64 seq = le64_to_cpu(jset->seq);
|
2023-11-03 09:43:26 +08:00
|
|
|
int ret;
|
2023-02-19 13:49:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Simple compaction, dropping empty jset_entries (from journal
|
|
|
|
* reservations that weren't fully used) and merging jset_entries that
|
|
|
|
* can be.
|
|
|
|
*
|
|
|
|
* If we wanted to be really fancy here, we could sort all the keys in
|
|
|
|
* the jset and drop keys that were overwritten - probably not worth it:
|
|
|
|
*/
|
2023-12-11 15:13:33 +08:00
|
|
|
vstruct_for_each(jset, i) {
|
2023-02-19 13:49:51 +08:00
|
|
|
unsigned u64s = le16_to_cpu(i->u64s);
|
|
|
|
|
|
|
|
/* Empty entry: */
|
|
|
|
if (!u64s)
|
|
|
|
continue;
|
|
|
|
|
2023-11-03 09:43:26 +08:00
|
|
|
/*
|
|
|
|
* New btree roots are set by journalling them; when the journal
|
|
|
|
* entry gets written we have to propagate them to
|
|
|
|
* c->btree_roots
|
|
|
|
*
|
|
|
|
* But, every journal entry we write has to contain all the
|
|
|
|
* btree roots (at least for now); so after we copy btree roots
|
|
|
|
* to c->btree_roots we have to get any missing btree roots and
|
|
|
|
* add them to this journal entry:
|
|
|
|
*/
|
2023-11-03 06:57:19 +08:00
|
|
|
switch (i->type) {
|
|
|
|
case BCH_JSET_ENTRY_btree_root:
|
2023-02-19 13:49:51 +08:00
|
|
|
bch2_journal_entry_to_btree_root(c, i);
|
2023-11-03 09:43:26 +08:00
|
|
|
__set_bit(i->btree_id, &btree_roots_have);
|
2023-11-03 06:57:19 +08:00
|
|
|
break;
|
|
|
|
case BCH_JSET_ENTRY_write_buffer_keys:
|
|
|
|
EBUG_ON(!w->need_flush_to_write_buffer);
|
|
|
|
|
|
|
|
if (!wb.wb)
|
|
|
|
bch2_journal_keys_to_write_buffer_start(c, &wb, seq);
|
|
|
|
|
|
|
|
jset_entry_for_each_key(i, k) {
|
|
|
|
ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k);
|
|
|
|
if (ret) {
|
2024-03-18 09:51:19 +08:00
|
|
|
bch2_fs_fatal_error(c, "flushing journal keys to btree write buffer: %s",
|
|
|
|
bch2_err_str(ret));
|
2023-11-03 06:57:19 +08:00
|
|
|
bch2_journal_keys_to_write_buffer_end(c, &wb);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
i->type = BCH_JSET_ENTRY_btree_keys;
|
|
|
|
break;
|
2023-11-03 09:43:26 +08:00
|
|
|
}
|
2023-02-19 13:49:51 +08:00
|
|
|
}
|
|
|
|
|
2023-11-03 06:57:19 +08:00
|
|
|
if (wb.wb)
|
|
|
|
bch2_journal_keys_to_write_buffer_end(c, &wb);
|
2024-02-18 06:54:39 +08:00
|
|
|
|
|
|
|
spin_lock(&c->journal.lock);
|
2023-11-03 06:57:19 +08:00
|
|
|
w->need_flush_to_write_buffer = false;
|
2024-02-18 06:54:39 +08:00
|
|
|
spin_unlock(&c->journal.lock);
|
2023-11-03 06:57:19 +08:00
|
|
|
|
2020-05-26 02:57:06 +08:00
|
|
|
start = end = vstruct_last(jset);
|
|
|
|
|
2023-11-03 09:43:26 +08:00
|
|
|
end = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have);
|
2020-05-26 02:57:06 +08:00
|
|
|
|
2024-01-27 23:16:15 +08:00
|
|
|
struct jset_entry_datetime *d =
|
|
|
|
container_of(jset_entry_init(&end, sizeof(*d)), struct jset_entry_datetime, entry);
|
|
|
|
d->entry.type = BCH_JSET_ENTRY_datetime;
|
|
|
|
d->seconds = cpu_to_le64(ktime_get_real_seconds());
|
|
|
|
|
2023-11-03 06:57:19 +08:00
|
|
|
bch2_journal_super_entries_add_common(c, &end, seq);
|
2019-01-25 06:54:51 +08:00
|
|
|
u64s = (u64 *) end - (u64 *) start;
|
2024-03-15 01:26:26 +08:00
|
|
|
|
|
|
|
WARN_ON(u64s > j->entry_u64s_reserved);
|
2019-01-25 06:54:51 +08:00
|
|
|
|
2019-02-19 06:39:42 +08:00
|
|
|
le32_add_cpu(&jset->u64s, u64s);
|
2023-05-13 08:28:54 +08:00
|
|
|
|
|
|
|
sectors = vstruct_sectors(jset, c->block_bits);
|
|
|
|
bytes = vstruct_bytes(jset);
|
|
|
|
|
|
|
|
if (sectors > w->sectors) {
|
2024-03-18 09:51:19 +08:00
|
|
|
bch2_fs_fatal_error(c, ": journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
|
2023-05-13 08:28:54 +08:00
|
|
|
vstruct_bytes(jset), w->sectors << 9,
|
|
|
|
u64s, w->u64s_reserved, j->entry_u64s_reserved);
|
2023-11-03 09:01:25 +08:00
|
|
|
return -EINVAL;
|
2023-05-13 08:28:54 +08:00
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
jset->magic = cpu_to_le64(jset_magic(c));
|
2023-06-29 08:27:07 +08:00
|
|
|
jset->version = cpu_to_le32(c->sb.version);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
|
|
|
|
SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
|
|
|
|
|
2021-12-26 07:40:15 +08:00
|
|
|
if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
|
2023-11-03 06:57:19 +08:00
|
|
|
j->last_empty_seq = seq;
|
2020-11-15 05:04:30 +08:00
|
|
|
|
2018-11-02 03:10:01 +08:00
|
|
|
if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
|
|
|
|
validate_before_checksum = true;
|
|
|
|
|
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 06:02:16 +08:00
|
|
|
if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
|
2018-11-02 03:10:01 +08:00
|
|
|
validate_before_checksum = true;
|
|
|
|
|
|
|
|
if (validate_before_checksum &&
|
2023-11-03 09:01:25 +08:00
|
|
|
(ret = jset_validate(c, NULL, jset, 0, WRITE)))
|
|
|
|
return ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-02-19 13:42:12 +08:00
|
|
|
ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
|
2017-03-17 14:18:50 +08:00
|
|
|
jset->encrypted_start,
|
|
|
|
vstruct_end(jset) - (void *) jset->encrypted_start);
|
2024-03-18 09:51:19 +08:00
|
|
|
if (bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret)))
|
2023-11-03 09:01:25 +08:00
|
|
|
return ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
|
|
|
|
journal_nonce(jset), jset);
|
|
|
|
|
2018-11-02 03:10:01 +08:00
|
|
|
if (!validate_before_checksum &&
|
2023-11-03 09:01:25 +08:00
|
|
|
(ret = jset_validate(c, NULL, jset, 0, WRITE)))
|
|
|
|
return ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-02-19 06:39:42 +08:00
|
|
|
memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
|
2023-11-03 09:01:25 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
int error = bch2_journal_error(j);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the journal is in an error state - we did an emergency shutdown -
|
|
|
|
* we prefer to continue doing journal writes. We just mark them as
|
|
|
|
* noflush so they'll never be used, but they'll still be visible by the
|
|
|
|
* list_journal tool - this helps in debugging.
|
|
|
|
*
|
|
|
|
* There's a caveat: the first journal write after marking the
|
|
|
|
* superblock dirty must always be a flush write, because on startup
|
|
|
|
* from a clean shutdown we didn't necessarily read the journal and the
|
|
|
|
* new journal write might overwrite whatever was in the journal
|
|
|
|
* previously - we can't leave the journal without any flush writes in
|
|
|
|
* it.
|
|
|
|
*
|
|
|
|
* So if we're in an error state, and we're still starting up, we don't
|
|
|
|
* write anything at all.
|
|
|
|
*/
|
2024-04-30 18:20:37 +08:00
|
|
|
if (error && test_bit(JOURNAL_need_flush_write, &j->flags))
|
2023-11-03 09:01:25 +08:00
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
if (error ||
|
|
|
|
w->noflush ||
|
|
|
|
(!w->must_flush &&
|
|
|
|
(jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
|
2024-04-30 18:20:37 +08:00
|
|
|
test_bit(JOURNAL_may_skip_flush, &j->flags))) {
|
2023-11-21 09:05:15 +08:00
|
|
|
w->noflush = true;
|
2023-11-03 09:01:25 +08:00
|
|
|
SET_JSET_NO_FLUSH(w->data, true);
|
|
|
|
w->data->last_seq = 0;
|
|
|
|
w->last_seq = 0;
|
|
|
|
|
|
|
|
j->nr_noflush_writes++;
|
|
|
|
} else {
|
2024-02-29 10:56:57 +08:00
|
|
|
w->must_flush = true;
|
2023-11-03 09:01:25 +08:00
|
|
|
j->last_flush_write = jiffies;
|
|
|
|
j->nr_flush_writes++;
|
2024-04-30 18:20:37 +08:00
|
|
|
clear_bit(JOURNAL_need_flush_write, &j->flags);
|
2023-11-03 09:01:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-11-18 08:13:27 +08:00
|
|
|
CLOSURE_CALLBACK(bch2_journal_write)
|
2023-11-03 09:01:25 +08:00
|
|
|
{
|
2024-02-01 02:42:48 +08:00
|
|
|
closure_type(w, struct journal_buf, io);
|
|
|
|
struct journal *j = container_of(w, struct journal, buf[w->idx]);
|
2023-11-03 09:01:25 +08:00
|
|
|
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
|
|
|
struct bch_replicas_padded replicas;
|
2023-12-17 12:47:29 +08:00
|
|
|
unsigned nr_rw_members = 0;
|
2023-11-03 09:01:25 +08:00
|
|
|
int ret;
|
|
|
|
|
2024-02-18 06:54:39 +08:00
|
|
|
for_each_rw_member(c, ca)
|
|
|
|
nr_rw_members++;
|
|
|
|
|
2023-11-03 09:01:25 +08:00
|
|
|
BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
|
2024-02-18 06:54:39 +08:00
|
|
|
BUG_ON(!w->write_started);
|
2024-02-01 03:26:15 +08:00
|
|
|
BUG_ON(w->write_allocated);
|
2024-02-18 06:54:39 +08:00
|
|
|
BUG_ON(w->write_done);
|
2023-11-03 09:01:25 +08:00
|
|
|
|
|
|
|
j->write_start_time = local_clock();
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-02-22 02:33:21 +08:00
|
|
|
spin_lock(&j->lock);
|
2024-02-18 06:54:39 +08:00
|
|
|
if (nr_rw_members > 1)
|
|
|
|
w->separate_flush = true;
|
|
|
|
|
2023-11-03 09:01:25 +08:00
|
|
|
ret = bch2_journal_write_pick_flush(j, w);
|
|
|
|
spin_unlock(&j->lock);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2023-11-03 09:06:52 +08:00
|
|
|
mutex_lock(&j->buf_lock);
|
2023-11-03 09:43:26 +08:00
|
|
|
journal_buf_realloc(j, w);
|
|
|
|
|
2023-11-03 09:01:25 +08:00
|
|
|
ret = bch2_journal_write_prep(j, w);
|
2023-11-03 09:06:52 +08:00
|
|
|
mutex_unlock(&j->buf_lock);
|
2023-11-03 09:01:25 +08:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2023-11-09 05:51:06 +08:00
|
|
|
j->entry_bytes_written += vstruct_bytes(w->data);
|
|
|
|
|
2023-11-03 09:01:25 +08:00
|
|
|
while (1) {
|
|
|
|
spin_lock(&j->lock);
|
|
|
|
ret = journal_write_alloc(j, w);
|
|
|
|
if (!ret || !j->can_discard)
|
|
|
|
break;
|
2019-02-22 02:33:21 +08:00
|
|
|
|
2020-02-27 04:58:36 +08:00
|
|
|
spin_unlock(&j->lock);
|
|
|
|
bch2_journal_do_discards(j);
|
|
|
|
}
|
|
|
|
|
2023-11-03 09:01:25 +08:00
|
|
|
if (ret) {
|
2024-06-21 01:10:34 +08:00
|
|
|
struct printbuf buf = PRINTBUF;
|
|
|
|
buf.atomic++;
|
|
|
|
|
|
|
|
prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write: %s"),
|
|
|
|
bch2_err_str(ret));
|
|
|
|
__bch2_journal_debug_to_text(&buf, j);
|
2023-11-03 09:01:25 +08:00
|
|
|
spin_unlock(&j->lock);
|
2024-06-21 01:10:34 +08:00
|
|
|
bch2_print_string_as_lines(KERN_ERR, buf.buf);
|
|
|
|
printbuf_exit(&buf);
|
2023-11-03 09:01:25 +08:00
|
|
|
goto err;
|
|
|
|
}
|
2021-02-24 14:16:49 +08:00
|
|
|
|
2019-02-22 02:33:21 +08:00
|
|
|
/*
|
|
|
|
* write is allocated, no longer need to account for it in
|
|
|
|
* bch2_journal_space_available():
|
|
|
|
*/
|
|
|
|
w->sectors = 0;
|
2024-02-01 03:26:15 +08:00
|
|
|
w->write_allocated = true;
|
2019-02-22 02:33:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* journal entry has been compacted and allocated, recalculate space
|
|
|
|
* available:
|
|
|
|
*/
|
|
|
|
bch2_journal_space_available(j);
|
2024-02-01 03:26:15 +08:00
|
|
|
bch2_journal_do_writes(j);
|
2019-02-22 02:33:21 +08:00
|
|
|
spin_unlock(&j->lock);
|
|
|
|
|
2021-05-28 11:16:25 +08:00
|
|
|
w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
|
|
|
|
|
2022-02-23 19:56:35 +08:00
|
|
|
if (c->opts.nochanges)
|
2017-03-17 14:18:50 +08:00
|
|
|
goto no_io;
|
|
|
|
|
2023-05-05 00:44:15 +08:00
|
|
|
/*
|
|
|
|
* Mark journal replicas before we submit the write to guarantee
|
|
|
|
* recovery will find the journal entries after a crash.
|
|
|
|
*/
|
|
|
|
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
|
|
|
|
w->devs_written);
|
|
|
|
ret = bch2_mark_replicas(c, &replicas.e);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2024-02-01 02:42:48 +08:00
|
|
|
if (!JSET_NO_FLUSH(w->data))
|
2024-04-20 09:54:32 +08:00
|
|
|
continue_at(cl, journal_write_preflush, j->wq);
|
|
|
|
else
|
|
|
|
continue_at(cl, journal_write_submit, j->wq);
|
2021-01-17 04:40:33 +08:00
|
|
|
return;
|
2017-03-17 14:18:50 +08:00
|
|
|
no_io:
|
2024-02-01 00:21:46 +08:00
|
|
|
continue_at(cl, journal_write_done, j->wq);
|
2017-03-17 14:18:50 +08:00
|
|
|
return;
|
|
|
|
err:
|
2022-01-12 11:08:44 +08:00
|
|
|
bch2_fatal_error(c);
|
2024-02-01 00:21:46 +08:00
|
|
|
continue_at(cl, journal_write_done, j->wq);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|