mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 05:24:12 +08:00
bcachefs: fs_usage_u64s()
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
768ac63924
commit
ecf37a4a80
@ -605,8 +605,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
|
||||
}
|
||||
|
||||
{
|
||||
unsigned nr = sizeof(struct bch_fs_usage) / sizeof(u64) +
|
||||
c->replicas.nr;
|
||||
unsigned nr = fs_usage_u64s(c);
|
||||
struct bch_fs_usage *dst = (void *)
|
||||
bch2_acc_percpu_u64s((void *) c->usage[0], nr);
|
||||
struct bch_fs_usage *src = (void *)
|
||||
@ -657,10 +656,8 @@ static int bch2_gc_start(struct bch_fs *c)
|
||||
|
||||
BUG_ON(c->usage[1]);
|
||||
|
||||
c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) +
|
||||
sizeof(u64) * c->replicas.nr,
|
||||
sizeof(u64),
|
||||
GFP_KERNEL);
|
||||
c->usage[1] = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
|
||||
sizeof(u64), GFP_KERNEL);
|
||||
percpu_up_write(&c->mark_lock);
|
||||
|
||||
if (!c->usage[1])
|
||||
|
@ -117,11 +117,11 @@ void bch2_bucket_seq_cleanup(struct bch_fs *c)
|
||||
void bch2_fs_usage_initialize(struct bch_fs *c)
|
||||
{
|
||||
struct bch_fs_usage *usage;
|
||||
unsigned i, nr;
|
||||
unsigned i;
|
||||
|
||||
percpu_down_write(&c->mark_lock);
|
||||
nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr;
|
||||
usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr);
|
||||
usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0],
|
||||
fs_usage_u64s(c));
|
||||
|
||||
for (i = 0; i < BCH_REPLICAS_MAX; i++)
|
||||
usage->reserved += usage->persistent_reserved[i];
|
||||
@ -159,24 +159,23 @@ struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
|
||||
struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
|
||||
{
|
||||
struct bch_fs_usage *ret;
|
||||
unsigned nr = READ_ONCE(c->replicas.nr);
|
||||
unsigned v, u64s = fs_usage_u64s(c);
|
||||
retry:
|
||||
ret = kzalloc(sizeof(*ret) + nr * sizeof(u64), GFP_NOFS);
|
||||
ret = kzalloc(u64s * sizeof(u64), GFP_NOFS);
|
||||
if (unlikely(!ret))
|
||||
return NULL;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
|
||||
if (unlikely(nr < c->replicas.nr)) {
|
||||
nr = c->replicas.nr;
|
||||
v = fs_usage_u64s(c);
|
||||
if (unlikely(u64s != v)) {
|
||||
u64s = v;
|
||||
percpu_up_read(&c->mark_lock);
|
||||
kfree(ret);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
acc_u64s_percpu((u64 *) ret,
|
||||
(u64 __percpu *) c->usage[0],
|
||||
sizeof(*ret) / sizeof(u64) + nr);
|
||||
acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -294,8 +293,7 @@ int bch2_fs_usage_apply(struct bch_fs *c,
|
||||
|
||||
preempt_disable();
|
||||
acc_u64s((u64 *) this_cpu_ptr(c->usage[0]),
|
||||
(u64 *) fs_usage,
|
||||
sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
|
||||
(u64 *) fs_usage, fs_usage_u64s(c));
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
|
@ -212,14 +212,18 @@ static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
|
||||
|
||||
/* Filesystem usage: */
|
||||
|
||||
static inline unsigned fs_usage_u64s(struct bch_fs *c)
|
||||
{
|
||||
|
||||
return sizeof(struct bch_fs_usage) / sizeof(u64) +
|
||||
READ_ONCE(c->replicas.nr);
|
||||
}
|
||||
|
||||
static inline struct bch_fs_usage *bch2_fs_usage_get_scratch(struct bch_fs *c)
|
||||
{
|
||||
struct bch_fs_usage *ret;
|
||||
|
||||
ret = this_cpu_ptr(c->usage_scratch);
|
||||
|
||||
memset(ret, 0, sizeof(*ret) + c->replicas.nr * sizeof(u64));
|
||||
struct bch_fs_usage *ret = this_cpu_ptr(c->usage_scratch);
|
||||
|
||||
memset(ret, 0, fs_usage_u64s(c) * sizeof(u64));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -262,39 +262,37 @@ static void __replicas_table_update(struct bch_fs_usage __percpu *dst_p,
|
||||
static int replicas_table_update(struct bch_fs *c,
|
||||
struct bch_replicas_cpu *new_r)
|
||||
{
|
||||
struct bch_fs_usage __percpu *new_usage[3] = { NULL, NULL, NULL };
|
||||
struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
|
||||
struct bch_fs_usage __percpu *new_scratch = NULL;
|
||||
unsigned bytes = sizeof(struct bch_fs_usage) +
|
||||
sizeof(u64) * new_r->nr;
|
||||
unsigned i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (i < 2 && !c->usage[i])
|
||||
continue;
|
||||
if (!(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
|
||||
GFP_NOIO)) ||
|
||||
(c->usage[1] &&
|
||||
!(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
|
||||
GFP_NOIO))) ||
|
||||
!(new_scratch = __alloc_percpu_gfp(bytes, sizeof(u64),
|
||||
GFP_NOIO)))
|
||||
goto err;
|
||||
|
||||
new_usage[i] = __alloc_percpu_gfp(bytes, sizeof(u64),
|
||||
GFP_NOIO);
|
||||
if (!new_usage[i])
|
||||
goto err;
|
||||
}
|
||||
if (c->usage[0])
|
||||
__replicas_table_update(new_usage[0], new_r,
|
||||
c->usage[0], &c->replicas);
|
||||
if (c->usage[1])
|
||||
__replicas_table_update(new_usage[1], new_r,
|
||||
c->usage[1], &c->replicas);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (!c->usage[i])
|
||||
continue;
|
||||
|
||||
__replicas_table_update(new_usage[i], new_r,
|
||||
c->usage[i], &c->replicas);
|
||||
|
||||
swap(c->usage[i], new_usage[i]);
|
||||
}
|
||||
|
||||
swap(c->usage_scratch, new_usage[2]);
|
||||
|
||||
swap(c->replicas, *new_r);
|
||||
swap(c->usage[0], new_usage[0]);
|
||||
swap(c->usage[1], new_usage[1]);
|
||||
swap(c->usage_scratch, new_scratch);
|
||||
swap(c->replicas, *new_r);
|
||||
ret = 0;
|
||||
err:
|
||||
for (i = 0; i < 3; i++)
|
||||
free_percpu(new_usage[i]);
|
||||
free_percpu(new_scratch);
|
||||
free_percpu(new_usage[1]);
|
||||
free_percpu(new_usage[0]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -975,5 +973,6 @@ int bch2_fs_replicas_init(struct bch_fs *c)
|
||||
{
|
||||
c->journal.entry_u64s_reserved +=
|
||||
reserve_journal_replicas(c, &c->replicas);
|
||||
return 0;
|
||||
|
||||
return replicas_table_update(c, &c->replicas);
|
||||
}
|
||||
|
@ -535,7 +535,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
{
|
||||
struct bch_sb_field_members *mi;
|
||||
struct bch_fs *c;
|
||||
unsigned i, iter_size, fs_usage_size;
|
||||
unsigned i, iter_size;
|
||||
const char *err;
|
||||
|
||||
pr_verbose_init(opts, "");
|
||||
@ -629,9 +629,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
(btree_blocks(c) + 1) * 2 *
|
||||
sizeof(struct btree_node_iter_set);
|
||||
|
||||
fs_usage_size = sizeof(struct bch_fs_usage) +
|
||||
sizeof(u64) * c->replicas.nr;
|
||||
|
||||
if (!(c->wq = alloc_workqueue("bcachefs",
|
||||
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
|
||||
!(c->copygc_wq = alloc_workqueue("bcache_copygc",
|
||||
@ -648,8 +645,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
max(offsetof(struct btree_read_bio, bio),
|
||||
offsetof(struct btree_write_bio, wbio.bio)),
|
||||
BIOSET_NEED_BVECS) ||
|
||||
!(c->usage[0] = __alloc_percpu(fs_usage_size, sizeof(u64))) ||
|
||||
!(c->usage_scratch = __alloc_percpu(fs_usage_size, sizeof(u64))) ||
|
||||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
|
||||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
|
||||
btree_bytes(c)) ||
|
||||
|
Loading…
Reference in New Issue
Block a user