mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
bcachefs: serialize persistent_reserved
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
3e0745e283
commit
3577df5f7f
@ -1363,7 +1363,8 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
|
||||
x(prio_ptrs, 2) \
|
||||
x(blacklist, 3) \
|
||||
x(blacklist_v2, 4) \
|
||||
x(usage, 5)
|
||||
x(usage, 5) \
|
||||
x(data_usage, 6)
|
||||
|
||||
enum {
|
||||
#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
|
||||
@ -1394,7 +1395,7 @@ struct jset_entry_blacklist_v2 {
|
||||
};
|
||||
|
||||
enum {
|
||||
FS_USAGE_REPLICAS = 0,
|
||||
FS_USAGE_RESERVED = 0,
|
||||
FS_USAGE_INODES = 1,
|
||||
FS_USAGE_KEY_VERSION = 2,
|
||||
FS_USAGE_NR = 3
|
||||
@ -1402,8 +1403,12 @@ enum {
|
||||
|
||||
struct jset_entry_usage {
|
||||
struct jset_entry entry;
|
||||
__le64 sectors;
|
||||
__u8 type;
|
||||
__le64 v;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct jset_entry_data_usage {
|
||||
struct jset_entry entry;
|
||||
__le64 v;
|
||||
struct bch_replicas_entry r;
|
||||
} __attribute__((packed));
|
||||
|
||||
|
@ -123,6 +123,9 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
|
||||
nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr;
|
||||
usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr);
|
||||
|
||||
for (i = 0; i < BCH_REPLICAS_MAX; i++)
|
||||
usage->s.reserved += usage->persistent_reserved[i];
|
||||
|
||||
for (i = 0; i < c->replicas.nr; i++) {
|
||||
struct bch_replicas_entry *e =
|
||||
cpu_replicas_entry(&c->replicas, i);
|
||||
|
@ -309,6 +309,27 @@ static int journal_entry_validate_usage(struct bch_fs *c,
|
||||
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
|
||||
int ret = 0;
|
||||
|
||||
if (journal_entry_err_on(bytes < sizeof(*u),
|
||||
c,
|
||||
"invalid journal entry usage: bad size")) {
|
||||
journal_entry_null_range(entry, vstruct_next(entry));
|
||||
return ret;
|
||||
}
|
||||
|
||||
fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int journal_entry_validate_data_usage(struct bch_fs *c,
|
||||
struct jset *jset,
|
||||
struct jset_entry *entry,
|
||||
int write)
|
||||
{
|
||||
struct jset_entry_data_usage *u =
|
||||
container_of(entry, struct jset_entry_data_usage, entry);
|
||||
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
|
||||
int ret = 0;
|
||||
|
||||
if (journal_entry_err_on(bytes < sizeof(*u) ||
|
||||
bytes < sizeof(*u) + u->r.nr_devs,
|
||||
c,
|
||||
|
@ -75,23 +75,32 @@ static int journal_replay_entry_early(struct bch_fs *c,
|
||||
struct jset_entry_usage *u =
|
||||
container_of(entry, struct jset_entry_usage, entry);
|
||||
|
||||
switch (u->type) {
|
||||
case FS_USAGE_REPLICAS:
|
||||
ret = bch2_replicas_set_usage(c, &u->r,
|
||||
le64_to_cpu(u->sectors));
|
||||
switch (entry->btree_id) {
|
||||
case FS_USAGE_RESERVED:
|
||||
if (entry->level < BCH_REPLICAS_MAX)
|
||||
percpu_u64_set(&c->usage[0]->
|
||||
persistent_reserved[entry->level],
|
||||
le64_to_cpu(u->v));
|
||||
break;
|
||||
case FS_USAGE_INODES:
|
||||
percpu_u64_set(&c->usage[0]->s.nr_inodes,
|
||||
le64_to_cpu(u->sectors));
|
||||
le64_to_cpu(u->v));
|
||||
break;
|
||||
case FS_USAGE_KEY_VERSION:
|
||||
atomic64_set(&c->key_version,
|
||||
le64_to_cpu(u->sectors));
|
||||
le64_to_cpu(u->v));
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case BCH_JSET_ENTRY_data_usage: {
|
||||
struct jset_entry_data_usage *u =
|
||||
container_of(entry, struct jset_entry_data_usage, entry);
|
||||
ret = bch2_replicas_set_usage(c, &u->r,
|
||||
le64_to_cpu(u->v));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -156,7 +165,8 @@ static bool journal_empty(struct list_head *journal)
|
||||
list_for_each_entry(i, journal, list) {
|
||||
vstruct_for_each(&i->j, entry) {
|
||||
if (entry->type == BCH_JSET_ENTRY_btree_root ||
|
||||
entry->type == BCH_JSET_ENTRY_usage)
|
||||
entry->type == BCH_JSET_ENTRY_usage ||
|
||||
entry->type == BCH_JSET_ENTRY_data_usage)
|
||||
continue;
|
||||
|
||||
if (entry->type == BCH_JSET_ENTRY_btree_keys &&
|
||||
|
@ -312,9 +312,14 @@ static unsigned reserve_journal_replicas(struct bch_fs *c,
|
||||
journal_res_u64s +=
|
||||
DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
|
||||
|
||||
/* persistent_reserved: */
|
||||
journal_res_u64s +=
|
||||
DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
|
||||
BCH_REPLICAS_MAX;
|
||||
|
||||
for_each_cpu_replicas_entry(r, e)
|
||||
journal_res_u64s +=
|
||||
DIV_ROUND_UP(sizeof(struct jset_entry_usage) +
|
||||
DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
|
||||
e->nr_devs, sizeof(u64));
|
||||
return journal_res_u64s;
|
||||
}
|
||||
|
@ -900,7 +900,6 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
struct jset_entry *entry,
|
||||
u64 journal_seq)
|
||||
{
|
||||
struct jset_entry_usage *u;
|
||||
struct btree_root *r;
|
||||
unsigned i;
|
||||
|
||||
@ -929,24 +928,45 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
|
||||
{
|
||||
u64 nr_inodes = percpu_u64_get(&c->usage[0]->s.nr_inodes);
|
||||
struct jset_entry_usage *u =
|
||||
container_of(entry, struct jset_entry_usage, entry);
|
||||
|
||||
u = container_of(entry, struct jset_entry_usage, entry);
|
||||
memset(u, 0, sizeof(*u));
|
||||
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
|
||||
u->entry.type = BCH_JSET_ENTRY_usage;
|
||||
u->sectors = cpu_to_le64(nr_inodes);
|
||||
u->type = FS_USAGE_INODES;
|
||||
u->entry.btree_id = FS_USAGE_INODES;
|
||||
u->v = cpu_to_le64(nr_inodes);
|
||||
|
||||
entry = vstruct_next(entry);
|
||||
}
|
||||
|
||||
{
|
||||
u = container_of(entry, struct jset_entry_usage, entry);
|
||||
struct jset_entry_usage *u =
|
||||
container_of(entry, struct jset_entry_usage, entry);
|
||||
|
||||
memset(u, 0, sizeof(*u));
|
||||
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
|
||||
u->entry.type = BCH_JSET_ENTRY_usage;
|
||||
u->sectors = cpu_to_le64(atomic64_read(&c->key_version));
|
||||
u->type = FS_USAGE_KEY_VERSION;
|
||||
u->entry.btree_id = FS_USAGE_KEY_VERSION;
|
||||
u->v = cpu_to_le64(atomic64_read(&c->key_version));
|
||||
|
||||
entry = vstruct_next(entry);
|
||||
}
|
||||
|
||||
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
|
||||
struct jset_entry_usage *u =
|
||||
container_of(entry, struct jset_entry_usage, entry);
|
||||
u64 sectors = percpu_u64_get(&c->usage[0]->persistent_reserved[i]);
|
||||
|
||||
if (!sectors)
|
||||
continue;
|
||||
|
||||
memset(u, 0, sizeof(*u));
|
||||
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
|
||||
u->entry.type = BCH_JSET_ENTRY_usage;
|
||||
u->entry.btree_id = FS_USAGE_RESERVED;
|
||||
u->entry.level = i;
|
||||
u->v = sectors;
|
||||
|
||||
entry = vstruct_next(entry);
|
||||
}
|
||||
@ -955,13 +975,14 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
struct bch_replicas_entry *e =
|
||||
cpu_replicas_entry(&c->replicas, i);
|
||||
u64 sectors = percpu_u64_get(&c->usage[0]->data[i]);
|
||||
struct jset_entry_data_usage *u =
|
||||
container_of(entry, struct jset_entry_data_usage, entry);
|
||||
|
||||
u = container_of(entry, struct jset_entry_usage, entry);
|
||||
memset(u, 0, sizeof(*u));
|
||||
u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
|
||||
sizeof(u64)) - 1;
|
||||
u->entry.type = BCH_JSET_ENTRY_usage;
|
||||
u->sectors = cpu_to_le64(sectors);
|
||||
u->type = FS_USAGE_REPLICAS;
|
||||
u->entry.type = BCH_JSET_ENTRY_data_usage;
|
||||
u->v = cpu_to_le64(sectors);
|
||||
unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
|
||||
"embedded variable length struct");
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user