mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
bcachefs: Move snapshot table size to struct snapshot_table
We need to add bounds checking for snapshot table accesses - it turns out there are cases where we do need to use the snapshots table before fsck checks have completed (and indeed, fsck may not have been run). Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
aa6e130e3c
commit
63332394c7
@ -810,7 +810,6 @@ struct bch_fs {
|
||||
|
||||
/* snapshot.c: */
|
||||
struct snapshot_table __rcu *snapshots;
|
||||
size_t snapshot_table_size;
|
||||
struct mutex snapshot_table_lock;
|
||||
struct rw_semaphore snapshot_create_lock;
|
||||
|
||||
|
@ -151,36 +151,39 @@ out:
|
||||
static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
|
||||
{
|
||||
size_t idx = U32_MAX - id;
|
||||
size_t new_size;
|
||||
struct snapshot_table *new, *old;
|
||||
|
||||
new_size = max(16UL, roundup_pow_of_two(idx + 1));
|
||||
size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
|
||||
size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
|
||||
|
||||
new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
|
||||
new = kvzalloc(new_bytes, GFP_KERNEL);
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
||||
new->nr = new_size;
|
||||
|
||||
old = rcu_dereference_protected(c->snapshots, true);
|
||||
if (old)
|
||||
memcpy(new->s,
|
||||
rcu_dereference_protected(c->snapshots, true)->s,
|
||||
sizeof(new->s[0]) * c->snapshot_table_size);
|
||||
memcpy(new->s, old->s, sizeof(old->s[0]) * old->nr);
|
||||
|
||||
rcu_assign_pointer(c->snapshots, new);
|
||||
c->snapshot_table_size = new_size;
|
||||
kvfree_rcu_mightsleep(old);
|
||||
kvfree_rcu(old, rcu);
|
||||
|
||||
return &rcu_dereference_protected(c->snapshots, true)->s[idx];
|
||||
return &rcu_dereference_protected(c->snapshots,
|
||||
lockdep_is_held(&c->snapshot_table_lock))->s[idx];
|
||||
}
|
||||
|
||||
static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
|
||||
{
|
||||
size_t idx = U32_MAX - id;
|
||||
struct snapshot_table *table =
|
||||
rcu_dereference_protected(c->snapshots,
|
||||
lockdep_is_held(&c->snapshot_table_lock));
|
||||
|
||||
lockdep_assert_held(&c->snapshot_table_lock);
|
||||
|
||||
if (likely(idx < c->snapshot_table_size))
|
||||
return &rcu_dereference_protected(c->snapshots, true)->s[idx];
|
||||
if (likely(table && idx < table->nr))
|
||||
return &table->s[idx];
|
||||
|
||||
return __snapshot_t_mut(c, id);
|
||||
}
|
||||
|
@ -33,7 +33,11 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
|
||||
|
||||
static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
|
||||
{
|
||||
return &t->s[U32_MAX - id];
|
||||
u32 idx = U32_MAX - id;
|
||||
|
||||
return likely(t && idx < t->nr)
|
||||
? &t->s[idx]
|
||||
: NULL;
|
||||
}
|
||||
|
||||
static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
|
||||
|
@ -20,6 +20,8 @@ struct snapshot_t {
|
||||
};
|
||||
|
||||
struct snapshot_table {
|
||||
struct rcu_head rcu;
|
||||
size_t nr;
|
||||
#ifndef RUST_BINDGEN
|
||||
DECLARE_FLEX_ARRAY(struct snapshot_t, s);
|
||||
#else
|
||||
|
Loading…
Reference in New Issue
Block a user