2017-03-17 14:18:50 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* bcache sysfs interfaces
|
|
|
|
*
|
|
|
|
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
|
|
|
* Copyright 2012 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef NO_BCACHEFS_SYSFS
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2018-10-06 12:46:55 +08:00
|
|
|
#include "alloc_background.h"
|
2021-12-26 10:21:46 +08:00
|
|
|
#include "alloc_foreground.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "sysfs.h"
|
|
|
|
#include "btree_cache.h"
|
|
|
|
#include "btree_io.h"
|
|
|
|
#include "btree_iter.h"
|
2020-06-16 07:53:46 +08:00
|
|
|
#include "btree_key_cache.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "btree_update.h"
|
|
|
|
#include "btree_gc.h"
|
|
|
|
#include "buckets.h"
|
2019-12-20 04:07:51 +08:00
|
|
|
#include "clock.h"
|
2024-01-07 10:01:47 +08:00
|
|
|
#include "compress.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "disk_groups.h"
|
2018-11-02 03:13:19 +08:00
|
|
|
#include "ec.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "inode.h"
|
|
|
|
#include "journal.h"
|
|
|
|
#include "keylist.h"
|
|
|
|
#include "move.h"
|
2023-03-02 12:10:39 +08:00
|
|
|
#include "movinggc.h"
|
2022-12-15 09:52:11 +08:00
|
|
|
#include "nocow_locking.h"
|
2017-03-17 14:18:50 +08:00
|
|
|
#include "opts.h"
|
|
|
|
#include "rebalance.h"
|
|
|
|
#include "replicas.h"
|
|
|
|
#include "super-io.h"
|
|
|
|
#include "tests.h"
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/sort.h>
|
|
|
|
#include <linux/sched/clock.h>
|
|
|
|
|
|
|
|
#include "util.h"
|
|
|
|
|
|
|
|
#define SYSFS_OPS(type) \
|
2022-09-19 03:43:50 +08:00
|
|
|
const struct sysfs_ops type ## _sysfs_ops = { \
|
2017-03-17 14:18:50 +08:00
|
|
|
.show = type ## _show, \
|
|
|
|
.store = type ## _store \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SHOW(fn) \
|
2022-02-26 02:18:19 +08:00
|
|
|
static ssize_t fn ## _to_text(struct printbuf *, \
|
2022-09-19 03:43:50 +08:00
|
|
|
struct kobject *, struct attribute *); \
|
2022-02-26 02:18:19 +08:00
|
|
|
\
|
2017-03-17 14:18:50 +08:00
|
|
|
static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
|
|
|
|
char *buf) \
|
2022-02-26 02:18:19 +08:00
|
|
|
{ \
|
|
|
|
struct printbuf out = PRINTBUF; \
|
|
|
|
ssize_t ret = fn ## _to_text(&out, kobj, attr); \
|
|
|
|
\
|
2022-04-15 08:30:30 +08:00
|
|
|
if (out.pos && out.buf[out.pos - 1] != '\n') \
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_newline(&out); \
|
2022-04-15 08:30:30 +08:00
|
|
|
\
|
2022-02-26 02:18:19 +08:00
|
|
|
if (!ret && out.allocation_failure) \
|
|
|
|
ret = -ENOMEM; \
|
|
|
|
\
|
|
|
|
if (!ret) { \
|
|
|
|
ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
|
|
|
|
memcpy(buf, out.buf, ret); \
|
|
|
|
} \
|
|
|
|
printbuf_exit(&out); \
|
2022-09-19 03:43:50 +08:00
|
|
|
return bch2_err_class(ret); \
|
2022-02-26 02:18:19 +08:00
|
|
|
} \
|
|
|
|
\
|
|
|
|
static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
|
|
|
|
struct attribute *attr)
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
#define STORE(fn) \
|
2022-09-19 03:43:50 +08:00
|
|
|
static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
|
|
|
|
const char *, size_t); \
|
|
|
|
\
|
2017-03-17 14:18:50 +08:00
|
|
|
static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
|
|
|
|
const char *buf, size_t size) \
|
2022-09-19 03:43:50 +08:00
|
|
|
{ \
|
|
|
|
return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
|
|
|
|
const char *buf, size_t size)
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
#define __sysfs_attribute(_name, _mode) \
|
|
|
|
static struct attribute sysfs_##_name = \
|
|
|
|
{ .name = #_name, .mode = _mode }
|
|
|
|
|
2022-10-23 03:59:53 +08:00
|
|
|
#define write_attribute(n) __sysfs_attribute(n, 0200)
|
|
|
|
#define read_attribute(n) __sysfs_attribute(n, 0444)
|
|
|
|
#define rw_attribute(n) __sysfs_attribute(n, 0644)
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
#define sysfs_printf(file, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
if (attr == &sysfs_ ## file) \
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, fmt "\n", __VA_ARGS__); \
|
2017-03-17 14:18:50 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define sysfs_print(file, var) \
|
|
|
|
do { \
|
|
|
|
if (attr == &sysfs_ ## file) \
|
2022-02-26 02:18:19 +08:00
|
|
|
snprint(out, var); \
|
2017-03-17 14:18:50 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define sysfs_hprint(file, val) \
|
|
|
|
do { \
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_ ## file) \
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_human_readable_s64(out, val); \
|
2017-03-17 14:18:50 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define sysfs_strtoul(file, var) \
|
|
|
|
do { \
|
|
|
|
if (attr == &sysfs_ ## file) \
|
|
|
|
return strtoul_safe(buf, var) ?: (ssize_t) size; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define sysfs_strtoul_clamp(file, var, min, max) \
|
|
|
|
do { \
|
|
|
|
if (attr == &sysfs_ ## file) \
|
|
|
|
return strtoul_safe_clamp(buf, var, min, max) \
|
|
|
|
?: (ssize_t) size; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define strtoul_or_return(cp) \
|
|
|
|
({ \
|
|
|
|
unsigned long _v; \
|
|
|
|
int _r = kstrtoul(cp, 10, &_v); \
|
|
|
|
if (_r) \
|
|
|
|
return _r; \
|
|
|
|
_v; \
|
|
|
|
})
|
|
|
|
|
|
|
|
write_attribute(trigger_gc);
|
2022-04-08 06:38:16 +08:00
|
|
|
write_attribute(trigger_discards);
|
2022-06-14 07:34:17 +08:00
|
|
|
write_attribute(trigger_invalidates);
|
2017-03-17 14:18:50 +08:00
|
|
|
write_attribute(prune_cache);
|
2022-09-18 03:20:13 +08:00
|
|
|
write_attribute(btree_wakeup);
|
2017-03-17 14:18:50 +08:00
|
|
|
rw_attribute(btree_gc_periodic);
|
2021-04-14 03:00:40 +08:00
|
|
|
rw_attribute(gc_gens_pos);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
read_attribute(uuid);
|
|
|
|
read_attribute(minor);
|
2023-11-27 06:05:02 +08:00
|
|
|
read_attribute(flags);
|
2017-03-17 14:18:50 +08:00
|
|
|
read_attribute(bucket_size);
|
|
|
|
read_attribute(first_bucket);
|
|
|
|
read_attribute(nbuckets);
|
2022-05-25 12:11:56 +08:00
|
|
|
rw_attribute(durability);
|
2023-10-26 04:29:37 +08:00
|
|
|
read_attribute(io_done);
|
|
|
|
read_attribute(io_errors);
|
|
|
|
write_attribute(io_errors_reset);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
read_attribute(io_latency_read);
|
|
|
|
read_attribute(io_latency_write);
|
|
|
|
read_attribute(io_latency_stats_read);
|
|
|
|
read_attribute(io_latency_stats_write);
|
|
|
|
read_attribute(congested);
|
|
|
|
|
2022-10-29 05:08:41 +08:00
|
|
|
read_attribute(btree_write_stats);
|
2021-04-01 09:07:37 +08:00
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
read_attribute(btree_cache_size);
|
|
|
|
read_attribute(compression_stats);
|
|
|
|
read_attribute(journal_debug);
|
2020-11-20 09:13:30 +08:00
|
|
|
read_attribute(btree_cache);
|
2020-06-16 07:53:46 +08:00
|
|
|
read_attribute(btree_key_cache);
|
2020-07-07 08:18:13 +08:00
|
|
|
read_attribute(stripes_heap);
|
2021-07-13 11:52:49 +08:00
|
|
|
read_attribute(open_buckets);
|
2023-03-01 12:08:48 +08:00
|
|
|
read_attribute(open_buckets_partial);
|
2022-11-01 04:13:05 +08:00
|
|
|
read_attribute(write_points);
|
2022-12-07 13:41:21 +08:00
|
|
|
read_attribute(nocow_lock_table);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2023-02-10 01:21:45 +08:00
|
|
|
#ifdef BCH_WRITE_REF_DEBUG
|
|
|
|
read_attribute(write_refs);
|
|
|
|
|
2023-07-07 10:47:42 +08:00
|
|
|
static const char * const bch2_write_refs[] = {
|
2023-02-10 01:21:45 +08:00
|
|
|
#define x(n) #n,
|
|
|
|
BCH_WRITE_REFS()
|
|
|
|
#undef x
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
|
|
|
|
{
|
|
|
|
bch2_printbuf_tabstop_push(out, 24);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
|
|
|
|
prt_str(out, bch2_write_refs[i]);
|
|
|
|
prt_tab(out);
|
|
|
|
prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
|
|
|
|
prt_newline(out);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
read_attribute(internal_uuid);
|
2023-05-31 02:41:50 +08:00
|
|
|
read_attribute(disk_groups);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
read_attribute(has_data);
|
|
|
|
read_attribute(alloc_debug);
|
|
|
|
|
2022-03-15 16:36:33 +08:00
|
|
|
#define x(t, n, ...) read_attribute(t);
|
|
|
|
BCH_PERSISTENT_COUNTERS()
|
|
|
|
#undef x
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
rw_attribute(discard);
|
|
|
|
rw_attribute(label);
|
|
|
|
|
|
|
|
rw_attribute(copy_gc_enabled);
|
2021-04-14 02:45:55 +08:00
|
|
|
read_attribute(copy_gc_wait);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
rw_attribute(rebalance_enabled);
|
|
|
|
sysfs_pd_controller_attribute(rebalance);
|
bcachefs: rebalance_work
This adds a new btree, rebalance_work, to eliminate scanning required
for finding extents that need work done on them in the background - i.e.
for the background_target and background_compression options.
rebalance_work is a bitset btree, where a KEY_TYPE_set corresponds to an
extent in the extents or reflink btree at the same pos.
A new extent field is added, bch_extent_rebalance, which indicates that
this extent has work that needs to be done in the background - and which
options to use. This allows per-inode options to be propagated to
indirect extents - at least in some circumstances. In this patch,
changing IO options on a file will not propagate the new options to
indirect extents pointed to by that file.
Updating (setting/clearing) the rebalance_work btree is done by the
extent trigger, which looks at the bch_extent_rebalance field.
Scanning is still requrired after changing IO path options - either just
for a given inode, or for the whole filesystem. We indicate that
scanning is required by adding a KEY_TYPE_cookie key to the
rebalance_work btree: the cookie counter is so that we can detect that
scanning is still required when an option has been flipped mid-way
through an existing scan.
Future possible work:
- Propagate options to indirect extents when being changed
- Add other IO path options - nr_replicas, ec, to rebalance_work so
they can be applied in the background when they change
- Add a counter, for bcachefs fs usage output, showing the pending
amount of rebalance work: we'll probably want to do this after the
disk space accounting rewrite (moving it to a new btree)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-10-21 01:33:14 +08:00
|
|
|
read_attribute(rebalance_status);
|
2017-03-17 14:18:50 +08:00
|
|
|
rw_attribute(promote_whole_extents);
|
|
|
|
|
2018-11-02 03:13:19 +08:00
|
|
|
read_attribute(new_stripes);
|
|
|
|
|
2019-12-20 04:07:51 +08:00
|
|
|
read_attribute(io_timers_read);
|
|
|
|
read_attribute(io_timers_write);
|
|
|
|
|
2023-03-12 09:38:46 +08:00
|
|
|
read_attribute(moving_ctxts);
|
2021-07-24 03:57:19 +08:00
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
#ifdef CONFIG_BCACHEFS_TESTS
|
|
|
|
write_attribute(perf_test);
|
|
|
|
#endif /* CONFIG_BCACHEFS_TESTS */
|
|
|
|
|
|
|
|
#define x(_name) \
|
|
|
|
static struct attribute sysfs_time_stat_##_name = \
|
2022-10-23 03:59:53 +08:00
|
|
|
{ .name = #_name, .mode = 0444 };
|
2017-03-17 14:18:50 +08:00
|
|
|
BCH_TIME_STATS()
|
|
|
|
#undef x
|
|
|
|
|
|
|
|
static struct attribute sysfs_state_rw = {
|
|
|
|
.name = "state",
|
2022-10-23 03:59:53 +08:00
|
|
|
.mode = 0444,
|
2017-03-17 14:18:50 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static size_t bch2_btree_cache_size(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
size_t ret = 0;
|
|
|
|
struct btree *b;
|
|
|
|
|
|
|
|
mutex_lock(&c->btree_cache.lock);
|
|
|
|
list_for_each_entry(b, &c->btree_cache.live, list)
|
2024-01-17 02:29:59 +08:00
|
|
|
ret += btree_buf_bytes(b);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
mutex_unlock(&c->btree_cache.lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-07-26 05:06:11 +08:00
|
|
|
static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2023-09-13 05:16:02 +08:00
|
|
|
struct btree_trans *trans;
|
2021-12-28 08:58:12 +08:00
|
|
|
enum btree_id id;
|
2023-11-25 12:40:08 +08:00
|
|
|
struct compression_type_stats {
|
|
|
|
u64 nr_extents;
|
|
|
|
u64 sectors_compressed;
|
|
|
|
u64 sectors_uncompressed;
|
|
|
|
} s[BCH_COMPRESSION_TYPE_NR];
|
|
|
|
u64 compressed_incompressible = 0;
|
2023-09-13 06:41:22 +08:00
|
|
|
int ret = 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2023-11-25 12:40:08 +08:00
|
|
|
memset(s, 0, sizeof(s));
|
|
|
|
|
2023-11-27 06:05:02 +08:00
|
|
|
if (!test_bit(BCH_FS_started, &c->flags))
|
2017-03-17 14:18:50 +08:00
|
|
|
return -EPERM;
|
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
trans = bch2_trans_get(c);
|
2019-03-26 03:10:15 +08:00
|
|
|
|
2021-12-28 08:58:12 +08:00
|
|
|
for (id = 0; id < BTREE_ID_NR; id++) {
|
2022-10-09 14:25:53 +08:00
|
|
|
if (!btree_type_has_ptrs(id))
|
2021-12-28 08:58:12 +08:00
|
|
|
continue;
|
|
|
|
|
2023-12-08 12:33:11 +08:00
|
|
|
ret = for_each_btree_key(trans, iter, id, POS_MIN,
|
|
|
|
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
2021-12-28 08:58:12 +08:00
|
|
|
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
2023-11-25 12:40:08 +08:00
|
|
|
struct bch_extent_crc_unpacked crc;
|
2018-09-28 09:08:39 +08:00
|
|
|
const union bch_extent_entry *entry;
|
2023-11-25 12:40:08 +08:00
|
|
|
bool compressed = false, incompressible = false;
|
|
|
|
|
|
|
|
bkey_for_each_crc(k.k, ptrs, crc, entry) {
|
|
|
|
incompressible |= crc.compression_type == BCH_COMPRESSION_TYPE_incompressible;
|
|
|
|
compressed |= crc_is_compressed(crc);
|
|
|
|
|
|
|
|
if (crc_is_compressed(crc)) {
|
|
|
|
s[crc.compression_type].nr_extents++;
|
|
|
|
s[crc.compression_type].sectors_compressed += crc.compressed_size;
|
|
|
|
s[crc.compression_type].sectors_uncompressed += crc.uncompressed_size;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
}
|
2021-12-28 08:58:12 +08:00
|
|
|
|
2023-11-25 12:40:08 +08:00
|
|
|
compressed_incompressible += compressed && incompressible;
|
|
|
|
|
|
|
|
if (!compressed) {
|
|
|
|
unsigned t = incompressible ? BCH_COMPRESSION_TYPE_incompressible : 0;
|
|
|
|
|
|
|
|
s[t].nr_extents++;
|
|
|
|
s[t].sectors_compressed += k.k->size;
|
|
|
|
s[t].sectors_uncompressed += k.k->size;
|
|
|
|
}
|
2023-12-04 13:38:56 +08:00
|
|
|
0;
|
|
|
|
}));
|
2021-12-28 08:58:12 +08:00
|
|
|
}
|
2019-04-18 03:49:28 +08:00
|
|
|
|
2023-09-13 05:16:02 +08:00
|
|
|
bch2_trans_put(trans);
|
2021-12-28 08:58:12 +08:00
|
|
|
|
2019-04-18 03:49:28 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2023-11-25 12:40:08 +08:00
|
|
|
prt_str(out, "type");
|
|
|
|
printbuf_tabstop_push(out, 12);
|
|
|
|
prt_tab(out);
|
2023-02-04 10:01:40 +08:00
|
|
|
|
2023-11-25 12:40:08 +08:00
|
|
|
prt_str(out, "compressed");
|
|
|
|
printbuf_tabstop_push(out, 16);
|
|
|
|
prt_tab_rjust(out);
|
|
|
|
|
|
|
|
prt_str(out, "uncompressed");
|
|
|
|
printbuf_tabstop_push(out, 16);
|
|
|
|
prt_tab_rjust(out);
|
|
|
|
|
|
|
|
prt_str(out, "average extent size");
|
|
|
|
printbuf_tabstop_push(out, 24);
|
|
|
|
prt_tab_rjust(out);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < ARRAY_SIZE(s); i++) {
|
2024-01-07 10:01:47 +08:00
|
|
|
bch2_prt_compression_type(out, i);
|
2023-11-25 12:40:08 +08:00
|
|
|
prt_tab(out);
|
|
|
|
|
|
|
|
prt_human_readable_u64(out, s[i].sectors_compressed << 9);
|
|
|
|
prt_tab_rjust(out);
|
|
|
|
|
|
|
|
prt_human_readable_u64(out, s[i].sectors_uncompressed << 9);
|
|
|
|
prt_tab_rjust(out);
|
|
|
|
|
|
|
|
prt_human_readable_u64(out, s[i].nr_extents
|
|
|
|
? div_u64(s[i].sectors_uncompressed << 9, s[i].nr_extents)
|
|
|
|
: 0);
|
|
|
|
prt_tab_rjust(out);
|
|
|
|
prt_newline(out);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (compressed_incompressible) {
|
|
|
|
prt_printf(out, "%llu compressed & incompressible extents", compressed_incompressible);
|
|
|
|
prt_newline(out);
|
|
|
|
}
|
2023-02-04 10:01:40 +08:00
|
|
|
|
2020-07-26 05:06:11 +08:00
|
|
|
return 0;
|
2018-11-02 03:13:19 +08:00
|
|
|
}
|
|
|
|
|
2021-05-24 05:04:13 +08:00
|
|
|
static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
|
2021-04-14 03:00:40 +08:00
|
|
|
{
|
2023-10-20 10:49:08 +08:00
|
|
|
prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
|
2021-04-14 03:00:40 +08:00
|
|
|
bch2_bpos_to_text(out, c->gc_gens_pos);
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "\n");
|
2021-04-14 03:00:40 +08:00
|
|
|
}
|
|
|
|
|
2022-09-18 03:20:13 +08:00
|
|
|
static void bch2_btree_wakeup_all(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
struct btree_trans *trans;
|
|
|
|
|
2023-06-20 09:01:13 +08:00
|
|
|
seqmutex_lock(&c->btree_trans_lock);
|
2022-09-18 03:20:13 +08:00
|
|
|
list_for_each_entry(trans, &c->btree_trans_list, list) {
|
|
|
|
struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
|
|
|
|
|
|
|
|
if (b)
|
|
|
|
six_lock_wakeup_all(&b->lock);
|
|
|
|
|
|
|
|
}
|
2023-06-20 09:01:13 +08:00
|
|
|
seqmutex_unlock(&c->btree_trans_lock);
|
2022-09-18 03:20:13 +08:00
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
SHOW(bch2_fs)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
|
|
|
|
|
|
|
|
sysfs_print(minor, c->minor);
|
|
|
|
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
|
|
|
|
|
2023-11-27 06:05:02 +08:00
|
|
|
if (attr == &sysfs_flags)
|
|
|
|
prt_bitflags(out, bch2_fs_flag_strs, c->flags);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
|
2022-10-29 05:08:41 +08:00
|
|
|
|
|
|
|
if (attr == &sysfs_btree_write_stats)
|
|
|
|
bch2_btree_write_stats_to_text(out, c);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
|
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_gc_gens_pos)
|
|
|
|
bch2_gc_gens_pos_to_text(out, c);
|
2021-04-14 03:00:40 +08:00
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
|
|
|
|
|
|
|
|
sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
|
|
|
|
sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
|
2023-03-02 12:10:39 +08:00
|
|
|
|
|
|
|
if (attr == &sysfs_copy_gc_wait)
|
|
|
|
bch2_copygc_wait_to_text(out, c);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
bcachefs: rebalance_work
This adds a new btree, rebalance_work, to eliminate scanning required
for finding extents that need work done on them in the background - i.e.
for the background_target and background_compression options.
rebalance_work is a bitset btree, where a KEY_TYPE_set corresponds to an
extent in the extents or reflink btree at the same pos.
A new extent field is added, bch_extent_rebalance, which indicates that
this extent has work that needs to be done in the background - and which
options to use. This allows per-inode options to be propagated to
indirect extents - at least in some circumstances. In this patch,
changing IO options on a file will not propagate the new options to
indirect extents pointed to by that file.
Updating (setting/clearing) the rebalance_work btree is done by the
extent trigger, which looks at the bch_extent_rebalance field.
Scanning is still requrired after changing IO path options - either just
for a given inode, or for the whole filesystem. We indicate that
scanning is required by adding a KEY_TYPE_cookie key to the
rebalance_work btree: the cookie counter is so that we can detect that
scanning is still required when an option has been flipped mid-way
through an existing scan.
Future possible work:
- Propagate options to indirect extents when being changed
- Add other IO path options - nr_replicas, ec, to rebalance_work so
they can be applied in the background when they change
- Add a counter, for bcachefs fs usage output, showing the pending
amount of rebalance work: we'll probably want to do this after the
disk space accounting rewrite (moving it to a new btree)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-10-21 01:33:14 +08:00
|
|
|
if (attr == &sysfs_rebalance_status)
|
|
|
|
bch2_rebalance_status_to_text(out, c);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
sysfs_print(promote_whole_extents, c->promote_whole_extents);
|
|
|
|
|
|
|
|
/* Debugging: */
|
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_journal_debug)
|
|
|
|
bch2_journal_debug_to_text(out, &c->journal);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_btree_cache)
|
|
|
|
bch2_btree_cache_to_text(out, c);
|
2020-11-20 09:13:30 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_btree_key_cache)
|
|
|
|
bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
|
2020-06-16 07:53:46 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_stripes_heap)
|
|
|
|
bch2_stripes_heap_to_text(out, c);
|
2020-07-07 08:18:13 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_open_buckets)
|
|
|
|
bch2_open_buckets_to_text(out, c);
|
2021-07-13 11:52:49 +08:00
|
|
|
|
2023-03-01 12:08:48 +08:00
|
|
|
if (attr == &sysfs_open_buckets_partial)
|
|
|
|
bch2_open_buckets_partial_to_text(out, c);
|
|
|
|
|
2022-11-01 04:13:05 +08:00
|
|
|
if (attr == &sysfs_write_points)
|
|
|
|
bch2_write_points_to_text(out, c);
|
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_compression_stats)
|
|
|
|
bch2_compression_stats_to_text(out, c);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_new_stripes)
|
|
|
|
bch2_new_stripes_to_text(out, c);
|
2018-11-02 03:13:19 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_io_timers_read)
|
|
|
|
bch2_io_timers_to_text(out, &c->io_clock[READ]);
|
2019-12-20 04:07:51 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_io_timers_write)
|
|
|
|
bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
|
|
|
|
|
2023-03-12 09:38:46 +08:00
|
|
|
if (attr == &sysfs_moving_ctxts)
|
|
|
|
bch2_fs_moving_ctxts_to_text(out, c);
|
2021-07-24 03:57:19 +08:00
|
|
|
|
2023-02-10 01:21:45 +08:00
|
|
|
#ifdef BCH_WRITE_REF_DEBUG
|
|
|
|
if (attr == &sysfs_write_refs)
|
|
|
|
bch2_write_refs_to_text(out, c);
|
|
|
|
#endif
|
|
|
|
|
2022-12-15 09:52:11 +08:00
|
|
|
if (attr == &sysfs_nocow_lock_table)
|
|
|
|
bch2_nocow_locks_to_text(out, &c->nocow_locks);
|
2022-12-07 13:41:21 +08:00
|
|
|
|
2023-05-31 02:41:50 +08:00
|
|
|
if (attr == &sysfs_disk_groups)
|
|
|
|
bch2_disk_groups_to_text(out, c);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-06-16 02:58:47 +08:00
|
|
|
STORE(bch2_fs)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
|
|
|
|
|
|
|
|
if (attr == &sysfs_btree_gc_periodic) {
|
|
|
|
ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
|
|
|
|
?: (ssize_t) size;
|
|
|
|
|
|
|
|
wake_up_process(c->gc_thread);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr == &sysfs_copy_gc_enabled) {
|
|
|
|
ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
|
|
|
|
?: (ssize_t) size;
|
|
|
|
|
2020-07-12 04:28:54 +08:00
|
|
|
if (c->copygc_thread)
|
|
|
|
wake_up_process(c->copygc_thread);
|
2017-03-17 14:18:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (attr == &sysfs_rebalance_enabled) {
|
|
|
|
ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
|
|
|
|
?: (ssize_t) size;
|
|
|
|
|
|
|
|
rebalance_wakeup(c);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
|
|
|
|
|
|
|
|
sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
|
|
|
|
|
|
|
|
/* Debugging: */
|
|
|
|
|
2023-11-27 06:05:02 +08:00
|
|
|
if (!test_bit(BCH_FS_started, &c->flags))
|
2017-03-17 14:18:50 +08:00
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
/* Debugging: */
|
|
|
|
|
2023-11-27 06:05:02 +08:00
|
|
|
if (!test_bit(BCH_FS_rw, &c->flags))
|
2021-12-24 17:51:10 +08:00
|
|
|
return -EROFS;
|
|
|
|
|
|
|
|
if (attr == &sysfs_prune_cache) {
|
|
|
|
struct shrink_control sc;
|
|
|
|
|
|
|
|
sc.gfp_mask = GFP_KERNEL;
|
|
|
|
sc.nr_to_scan = strtoul_or_return(buf);
|
Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series "Fixes and cleanups to compaction".
- Joel Fernandes has a patchset ("Optimize mremap during mutual
alignment within PMD") which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested.
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i the
following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series "Do not try to access unaccepted memory" Adrian Hunter
provides some fixups for the recently-added "unaccepted memory' feature.
To increase the feature's checking coverage. "Plug a few gaps where
RAM is exposed without checking if it is unaccepted memory".
- In the series "cleanups for lockless slab shrink" Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code.
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series "use refcount+RCU method to implement
lockless slab shrink".
- David Hildenbrand contributes some maintenance work for the rmap code
in the series "Anon rmap cleanups".
- Kefeng Wang does more folio conversions and some maintenance work in
the migration code. Series "mm: migrate: more folio conversion and
unification".
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series "Add and use bdev_getblk()".
- In the series "Use nth_page() in place of direct struct page
manipulation" Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames.
- In the series "mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO" has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of gigantic
pages are in use.
- Matthew Wilcox has sent the series "Small hugetlb cleanups" - code
rationalization and folio conversions in the hugetlb code.
- Yin Fengwei has improved mlock()'s handling of large folios in the
series "support large folio for mlock"
- In the series "Expose swapcache stat for memcg v1" Liu Shixin has
added statistics for memcg v1 users which are available (and useful)
under memcg v2.
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named "MDWE
without inheritance".
- Kefeng Wang has provided the series "mm: convert numa balancing
functions to use a folio" which does what it says.
- In the series "mm/ksm: add fork-exec support for prctl" Stefan Roesch
makes is possible for a process to propagate KSM treatment across
exec().
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use "high
bandwidth memory" in addition to Optane Data Center Persistent Memory
Modules (DCPMM). The series is named "memory tiering: calculate
abstract distance based on ACPI HMAT"
- In the series "Smart scanning mode for KSM" Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans.
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in the
series "mm: memcg: fix tracking of pending stats updates values".
- In the series "Implement IOCTL to get and optionally clear info about
PTEs" Peter Xu has added an ioctl to /proc/<pid>/pagemap which permits
us to atomically read-then-clear page softdirty state. This is mainly
used by CRIU.
- Hugh Dickins contributed the series "shmem,tmpfs: general maintenance"
- a bunch of relatively minor maintenance tweaks to this code.
- Matthew Wilcox has increased the use of the VMA lock over file-backed
page faults in the series "Handle more faults under the VMA lock". Some
rationalizations of the fault path became possible as a result.
- In the series "mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()" David Hildenbrand has implemented some cleanups
and folio conversions.
- In the series "various improvements to the GUP interface" Lorenzo
Stoakes has simplified and improved the GUP interface with an eye to
providing groundwork for future improvements.
- Andrey Konovalov has sent along the series "kasan: assorted fixes and
improvements" which does those things.
- Some page allocator maintenance work from Kemeng Shi in the series
"Two minor cleanups to break_down_buddy_pages".
- In thes series "New selftest for mm" Breno Leitao has developed
another MM self test which tickles a race we had between madvise() and
page faults.
- In the series "Add folio_end_read" Matthew Wilcox provides cleanups
and an optimization to the core pagecache code.
- Nhat Pham has added memcg accounting for hugetlb memory in the series
"hugetlb memcg accounting".
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series "Abstract vma_merge() and split_vma()".
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series "Fix page_owner's use of free timestamps".
- Lorenzo Stoakes has fixed the handling of new mappings of sealed files
in the series "permit write-sealed memfd read-only shared mappings".
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series "Batch hugetlb vmemmap modification operations".
- Some buffer_head folio conversions and cleanups from Matthew Wilcox in
the series "Finish the create_empty_buffers() transition".
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the series
"mm: PCP high auto-tuning".
- Roman Gushchin has contributed the patchset "mm: improve performance
of accounted kernel memory allocations" which improves their performance
by ~30% as measured by a micro-benchmark.
- folio conversions from Kefeng Wang in the series "mm: convert page
cpupid functions to folios".
- Some kmemleak fixups in Liu Shixin's series "Some bugfix about
kmemleak".
- Qi Zheng has improved our handling of memoryless nodes by keeping them
off the allocation fallback list. This is done in the series "handle
memoryless nodes more appropriately".
- khugepaged conversions from Vishal Moola in the series "Some
khugepaged folio conversions".
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZULEMwAKCRDdBJ7gKXxA
jhQHAQCYpD3g849x69DmHnHWHm/EHQLvQmRMDeYZI+nx/sCJOwEAw4AKg0Oemv9y
FgeUPAD1oasg6CP+INZvCj34waNxwAc=
=E+Y4
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Many singleton patches against the MM code. The patch series which are
included in this merge do the following:
- Kemeng Shi has contributed some compation maintenance work in the
series 'Fixes and cleanups to compaction'
- Joel Fernandes has a patchset ('Optimize mremap during mutual
alignment within PMD') which fixes an obscure issue with mremap()'s
pagetable handling during a subsequent exec(), based upon an
implementation which Linus suggested
- More DAMON/DAMOS maintenance and feature work from SeongJae Park i
the following patch series:
mm/damon: misc fixups for documents, comments and its tracepoint
mm/damon: add a tracepoint for damos apply target regions
mm/damon: provide pseudo-moving sum based access rate
mm/damon: implement DAMOS apply intervals
mm/damon/core-test: Fix memory leaks in core-test
mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval
- In the series 'Do not try to access unaccepted memory' Adrian
Hunter provides some fixups for the recently-added 'unaccepted
memory' feature. To increase the feature's checking coverage. 'Plug
a few gaps where RAM is exposed without checking if it is
unaccepted memory'
- In the series 'cleanups for lockless slab shrink' Qi Zheng has done
some maintenance work which is preparation for the lockless slab
shrinking code
- Qi Zheng has redone the earlier (and reverted) attempt to make slab
shrinking lockless in the series 'use refcount+RCU method to
implement lockless slab shrink'
- David Hildenbrand contributes some maintenance work for the rmap
code in the series 'Anon rmap cleanups'
- Kefeng Wang does more folio conversions and some maintenance work
in the migration code. Series 'mm: migrate: more folio conversion
and unification'
- Matthew Wilcox has fixed an issue in the buffer_head code which was
causing long stalls under some heavy memory/IO loads. Some cleanups
were added on the way. Series 'Add and use bdev_getblk()'
- In the series 'Use nth_page() in place of direct struct page
manipulation' Zi Yan has fixed a potential issue with the direct
manipulation of hugetlb page frames
- In the series 'mm: hugetlb: Skip initialization of gigantic tail
struct pages if freed by HVO' has improved our handling of gigantic
pages in the hugetlb vmmemmep optimizaton code. This provides
significant boot time improvements when significant amounts of
gigantic pages are in use
- Matthew Wilcox has sent the series 'Small hugetlb cleanups' - code
rationalization and folio conversions in the hugetlb code
- Yin Fengwei has improved mlock()'s handling of large folios in the
series 'support large folio for mlock'
- In the series 'Expose swapcache stat for memcg v1' Liu Shixin has
added statistics for memcg v1 users which are available (and
useful) under memcg v2
- Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable)
prctl so that userspace may direct the kernel to not automatically
propagate the denial to child processes. The series is named 'MDWE
without inheritance'
- Kefeng Wang has provided the series 'mm: convert numa balancing
functions to use a folio' which does what it says
- In the series 'mm/ksm: add fork-exec support for prctl' Stefan
Roesch makes is possible for a process to propagate KSM treatment
across exec()
- Huang Ying has enhanced memory tiering's calculation of memory
distances. This is used to permit the dax/kmem driver to use 'high
bandwidth memory' in addition to Optane Data Center Persistent
Memory Modules (DCPMM). The series is named 'memory tiering:
calculate abstract distance based on ACPI HMAT'
- In the series 'Smart scanning mode for KSM' Stefan Roesch has
optimized KSM by teaching it to retain and use some historical
information from previous scans
- Yosry Ahmed has fixed some inconsistencies in memcg statistics in
the series 'mm: memcg: fix tracking of pending stats updates
values'
- In the series 'Implement IOCTL to get and optionally clear info
about PTEs' Peter Xu has added an ioctl to /proc/<pid>/pagemap
which permits us to atomically read-then-clear page softdirty
state. This is mainly used by CRIU
- Hugh Dickins contributed the series 'shmem,tmpfs: general
maintenance', a bunch of relatively minor maintenance tweaks to
this code
- Matthew Wilcox has increased the use of the VMA lock over
file-backed page faults in the series 'Handle more faults under the
VMA lock'. Some rationalizations of the fault path became possible
as a result
- In the series 'mm/rmap: convert page_move_anon_rmap() to
folio_move_anon_rmap()' David Hildenbrand has implemented some
cleanups and folio conversions
- In the series 'various improvements to the GUP interface' Lorenzo
Stoakes has simplified and improved the GUP interface with an eye
to providing groundwork for future improvements
- Andrey Konovalov has sent along the series 'kasan: assorted fixes
and improvements' which does those things
- Some page allocator maintenance work from Kemeng Shi in the series
'Two minor cleanups to break_down_buddy_pages'
- In thes series 'New selftest for mm' Breno Leitao has developed
another MM self test which tickles a race we had between madvise()
and page faults
- In the series 'Add folio_end_read' Matthew Wilcox provides cleanups
and an optimization to the core pagecache code
- Nhat Pham has added memcg accounting for hugetlb memory in the
series 'hugetlb memcg accounting'
- Cleanups and rationalizations to the pagemap code from Lorenzo
Stoakes, in the series 'Abstract vma_merge() and split_vma()'
- Audra Mitchell has fixed issues in the procfs page_owner code's new
timestamping feature which was causing some misbehaviours. In the
series 'Fix page_owner's use of free timestamps'
- Lorenzo Stoakes has fixed the handling of new mappings of sealed
files in the series 'permit write-sealed memfd read-only shared
mappings'
- Mike Kravetz has optimized the hugetlb vmemmap optimization in the
series 'Batch hugetlb vmemmap modification operations'
- Some buffer_head folio conversions and cleanups from Matthew Wilcox
in the series 'Finish the create_empty_buffers() transition'
- As a page allocator performance optimization Huang Ying has added
automatic tuning to the allocator's per-cpu-pages feature, in the
series 'mm: PCP high auto-tuning'
- Roman Gushchin has contributed the patchset 'mm: improve
performance of accounted kernel memory allocations' which improves
their performance by ~30% as measured by a micro-benchmark
- folio conversions from Kefeng Wang in the series 'mm: convert page
cpupid functions to folios'
- Some kmemleak fixups in Liu Shixin's series 'Some bugfix about
kmemleak'
- Qi Zheng has improved our handling of memoryless nodes by keeping
them off the allocation fallback list. This is done in the series
'handle memoryless nodes more appropriately'
- khugepaged conversions from Vishal Moola in the series 'Some
khugepaged folio conversions'"
[ bcachefs conflicts with the dynamically allocated shrinkers have been
resolved as per Stephen Rothwell in
https://lore.kernel.org/all/20230913093553.4290421e@canb.auug.org.au/
with help from Qi Zheng.
The clone3 test filtering conflict was half-arsed by yours truly ]
* tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (406 commits)
mm/damon/sysfs: update monitoring target regions for online input commit
mm/damon/sysfs: remove requested targets when online-commit inputs
selftests: add a sanity check for zswap
Documentation: maple_tree: fix word spelling error
mm/vmalloc: fix the unchecked dereference warning in vread_iter()
zswap: export compression failure stats
Documentation: ubsan: drop "the" from article title
mempolicy: migration attempt to match interleave nodes
mempolicy: mmap_lock is not needed while migrating folios
mempolicy: alloc_pages_mpol() for NUMA policy without vma
mm: add page_rmappable_folio() wrapper
mempolicy: remove confusing MPOL_MF_LAZY dead code
mempolicy: mpol_shared_policy_init() without pseudo-vma
mempolicy trivia: use pgoff_t in shared mempolicy tree
mempolicy trivia: slightly more consistent naming
mempolicy trivia: delete those ancient pr_debug()s
mempolicy: fix migrate_pages(2) syscall return nr_failed
kernfs: drop shared NUMA mempolicy hooks
hugetlbfs: drop shared NUMA mempolicy pretence
mm/damon/sysfs-test: add a unit test for damon_sysfs_set_targets()
...
2023-11-03 13:38:47 +08:00
|
|
|
c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
|
2021-12-24 17:51:10 +08:00
|
|
|
}
|
|
|
|
|
2022-09-18 03:20:13 +08:00
|
|
|
if (attr == &sysfs_btree_wakeup)
|
|
|
|
bch2_btree_wakeup_all(c);
|
|
|
|
|
2020-06-16 02:58:47 +08:00
|
|
|
if (attr == &sysfs_trigger_gc) {
|
2020-06-16 03:10:54 +08:00
|
|
|
/*
|
|
|
|
* Full gc is currently incompatible with btree key cache:
|
|
|
|
*/
|
|
|
|
#if 0
|
2020-06-16 02:58:47 +08:00
|
|
|
down_read(&c->state_lock);
|
2021-01-27 09:15:46 +08:00
|
|
|
bch2_gc(c, false, false);
|
2020-06-16 02:58:47 +08:00
|
|
|
up_read(&c->state_lock);
|
2020-06-16 03:10:54 +08:00
|
|
|
#else
|
|
|
|
bch2_gc_gens(c);
|
|
|
|
#endif
|
2020-06-16 02:58:47 +08:00
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-04-08 06:38:16 +08:00
|
|
|
if (attr == &sysfs_trigger_discards)
|
|
|
|
bch2_do_discards(c);
|
|
|
|
|
2022-06-14 07:34:17 +08:00
|
|
|
if (attr == &sysfs_trigger_invalidates)
|
|
|
|
bch2_do_invalidates(c);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
#ifdef CONFIG_BCACHEFS_TESTS
|
|
|
|
if (attr == &sysfs_perf_test) {
|
|
|
|
char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
|
|
|
|
char *test = strsep(&p, " \t\n");
|
|
|
|
char *nr_str = strsep(&p, " \t\n");
|
|
|
|
char *threads_str = strsep(&p, " \t\n");
|
|
|
|
unsigned threads;
|
|
|
|
u64 nr;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
if (threads_str &&
|
|
|
|
!(ret = kstrtouint(threads_str, 10, &threads)) &&
|
|
|
|
!(ret = bch2_strtoull_h(nr_str, &nr)))
|
2020-12-02 01:23:55 +08:00
|
|
|
ret = bch2_btree_perf_test(c, test, nr, threads);
|
2017-03-17 14:18:50 +08:00
|
|
|
kfree(tmp);
|
2020-12-02 01:23:55 +08:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
size = ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
SYSFS_OPS(bch2_fs);
|
|
|
|
|
|
|
|
struct attribute *bch2_fs_files[] = {
|
|
|
|
&sysfs_minor,
|
|
|
|
&sysfs_btree_cache_size,
|
2022-10-29 05:08:41 +08:00
|
|
|
&sysfs_btree_write_stats,
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
&sysfs_promote_whole_extents,
|
|
|
|
|
|
|
|
&sysfs_compression_stats,
|
|
|
|
|
|
|
|
#ifdef CONFIG_BCACHEFS_TESTS
|
|
|
|
&sysfs_perf_test,
|
|
|
|
#endif
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2022-03-15 16:36:33 +08:00
|
|
|
/* counters dir */
|
|
|
|
|
|
|
|
SHOW(bch2_fs_counters)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
|
|
|
|
u64 counter = 0;
|
|
|
|
u64 counter_since_mount = 0;
|
|
|
|
|
2023-02-04 10:01:40 +08:00
|
|
|
printbuf_tabstop_push(out, 32);
|
|
|
|
|
2022-03-15 16:36:33 +08:00
|
|
|
#define x(t, ...) \
|
|
|
|
if (attr == &sysfs_##t) { \
|
|
|
|
counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
|
|
|
|
counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "since mount:"); \
|
|
|
|
prt_tab(out); \
|
2023-03-31 06:49:02 +08:00
|
|
|
prt_human_readable_u64(out, counter_since_mount); \
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_newline(out); \
|
2022-03-15 16:36:33 +08:00
|
|
|
\
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "since filesystem creation:"); \
|
|
|
|
prt_tab(out); \
|
2023-03-31 06:49:02 +08:00
|
|
|
prt_human_readable_u64(out, counter); \
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_newline(out); \
|
2022-03-15 16:36:33 +08:00
|
|
|
}
|
|
|
|
BCH_PERSISTENT_COUNTERS()
|
|
|
|
#undef x
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
STORE(bch2_fs_counters) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSFS_OPS(bch2_fs_counters);
|
|
|
|
|
|
|
|
struct attribute *bch2_fs_counters_files[] = {
|
|
|
|
#define x(t, ...) \
|
|
|
|
&sysfs_##t,
|
|
|
|
BCH_PERSISTENT_COUNTERS()
|
|
|
|
#undef x
|
|
|
|
NULL
|
|
|
|
};
|
2017-03-17 14:18:50 +08:00
|
|
|
/* internal dir - just a wrapper */
|
|
|
|
|
|
|
|
SHOW(bch2_fs_internal)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
|
2022-10-23 03:59:53 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
return bch2_fs_to_text(out, &c->kobj, attr);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STORE(bch2_fs_internal)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
|
2022-10-23 03:59:53 +08:00
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
return bch2_fs_store(&c->kobj, attr, buf, size);
|
|
|
|
}
|
|
|
|
SYSFS_OPS(bch2_fs_internal);
|
|
|
|
|
|
|
|
struct attribute *bch2_fs_internal_files[] = {
|
2023-11-27 06:05:02 +08:00
|
|
|
&sysfs_flags,
|
2017-03-17 14:18:50 +08:00
|
|
|
&sysfs_journal_debug,
|
2020-11-20 09:13:30 +08:00
|
|
|
&sysfs_btree_cache,
|
2020-06-16 07:53:46 +08:00
|
|
|
&sysfs_btree_key_cache,
|
2021-12-15 03:24:04 +08:00
|
|
|
&sysfs_new_stripes,
|
2020-07-07 08:18:13 +08:00
|
|
|
&sysfs_stripes_heap,
|
2021-07-13 11:52:49 +08:00
|
|
|
&sysfs_open_buckets,
|
2023-03-01 12:08:48 +08:00
|
|
|
&sysfs_open_buckets_partial,
|
2022-11-01 04:13:05 +08:00
|
|
|
&sysfs_write_points,
|
2023-02-10 01:21:45 +08:00
|
|
|
#ifdef BCH_WRITE_REF_DEBUG
|
|
|
|
&sysfs_write_refs,
|
|
|
|
#endif
|
2022-12-07 13:41:21 +08:00
|
|
|
&sysfs_nocow_lock_table,
|
2021-12-15 03:24:04 +08:00
|
|
|
&sysfs_io_timers_read,
|
|
|
|
&sysfs_io_timers_write,
|
|
|
|
|
|
|
|
&sysfs_trigger_gc,
|
2022-04-08 06:38:16 +08:00
|
|
|
&sysfs_trigger_discards,
|
2022-06-14 07:34:17 +08:00
|
|
|
&sysfs_trigger_invalidates,
|
2021-12-15 03:24:04 +08:00
|
|
|
&sysfs_prune_cache,
|
2022-09-18 03:20:13 +08:00
|
|
|
&sysfs_btree_wakeup,
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2021-04-14 03:00:40 +08:00
|
|
|
&sysfs_gc_gens_pos,
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
&sysfs_copy_gc_enabled,
|
2021-04-14 02:45:55 +08:00
|
|
|
&sysfs_copy_gc_wait,
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
&sysfs_rebalance_enabled,
|
bcachefs: rebalance_work
This adds a new btree, rebalance_work, to eliminate scanning required
for finding extents that need work done on them in the background - i.e.
for the background_target and background_compression options.
rebalance_work is a bitset btree, where a KEY_TYPE_set corresponds to an
extent in the extents or reflink btree at the same pos.
A new extent field is added, bch_extent_rebalance, which indicates that
this extent has work that needs to be done in the background - and which
options to use. This allows per-inode options to be propagated to
indirect extents - at least in some circumstances. In this patch,
changing IO options on a file will not propagate the new options to
indirect extents pointed to by that file.
Updating (setting/clearing) the rebalance_work btree is done by the
extent trigger, which looks at the bch_extent_rebalance field.
Scanning is still requrired after changing IO path options - either just
for a given inode, or for the whole filesystem. We indicate that
scanning is required by adding a KEY_TYPE_cookie key to the
rebalance_work btree: the cookie counter is so that we can detect that
scanning is still required when an option has been flipped mid-way
through an existing scan.
Future possible work:
- Propagate options to indirect extents when being changed
- Add other IO path options - nr_replicas, ec, to rebalance_work so
they can be applied in the background when they change
- Add a counter, for bcachefs fs usage output, showing the pending
amount of rebalance work: we'll probably want to do this after the
disk space accounting rewrite (moving it to a new btree)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-10-21 01:33:14 +08:00
|
|
|
&sysfs_rebalance_status,
|
2017-03-17 14:18:50 +08:00
|
|
|
sysfs_pd_controller_files(rebalance),
|
|
|
|
|
2023-03-12 09:38:46 +08:00
|
|
|
&sysfs_moving_ctxts,
|
2021-07-24 03:57:19 +08:00
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
&sysfs_internal_uuid,
|
2023-05-31 02:41:50 +08:00
|
|
|
|
|
|
|
&sysfs_disk_groups,
|
2017-03-17 14:18:50 +08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
/* options */
|
|
|
|
|
|
|
|
SHOW(bch2_fs_opts_dir)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
|
|
|
|
const struct bch_option *opt = container_of(attr, struct bch_option, attr);
|
|
|
|
int id = opt - bch2_opt_table;
|
|
|
|
u64 v = bch2_opt_get_by_id(&c->opts, id);
|
|
|
|
|
2022-03-06 01:01:16 +08:00
|
|
|
bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_char(out, '\n');
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
return 0;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
STORE(bch2_fs_opts_dir)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
|
|
|
|
const struct bch_option *opt = container_of(attr, struct bch_option, attr);
|
|
|
|
int ret, id = opt - bch2_opt_table;
|
|
|
|
char *tmp;
|
|
|
|
u64 v;
|
|
|
|
|
2022-03-07 04:15:41 +08:00
|
|
|
/*
|
|
|
|
* We don't need to take c->writes for correctness, but it eliminates an
|
|
|
|
* unsightly error message in the dmesg log when we're RO:
|
|
|
|
*/
|
2023-02-10 01:21:45 +08:00
|
|
|
if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
|
2022-03-07 04:15:41 +08:00
|
|
|
return -EROFS;
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
tmp = kstrdup(buf, GFP_KERNEL);
|
2022-03-07 04:15:41 +08:00
|
|
|
if (!tmp) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-03-21 12:15:38 +08:00
|
|
|
ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
|
2017-03-17 14:18:50 +08:00
|
|
|
kfree(tmp);
|
|
|
|
|
|
|
|
if (ret < 0)
|
2022-03-07 04:15:41 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2018-11-13 07:30:55 +08:00
|
|
|
ret = bch2_opt_check_may_set(c, id, v);
|
|
|
|
if (ret < 0)
|
2022-03-07 04:15:41 +08:00
|
|
|
goto err;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2021-12-15 03:24:41 +08:00
|
|
|
bch2_opt_set_sb(c, opt, v);
|
2017-03-17 14:18:50 +08:00
|
|
|
bch2_opt_set_by_id(&c->opts, id, v);
|
|
|
|
|
2024-01-17 05:20:21 +08:00
|
|
|
if (v &&
|
|
|
|
(id == Opt_background_target ||
|
|
|
|
id == Opt_background_compression ||
|
|
|
|
(id == Opt_compression && !c->opts.background_compression)))
|
bcachefs: rebalance_work
This adds a new btree, rebalance_work, to eliminate scanning required
for finding extents that need work done on them in the background - i.e.
for the background_target and background_compression options.
rebalance_work is a bitset btree, where a KEY_TYPE_set corresponds to an
extent in the extents or reflink btree at the same pos.
A new extent field is added, bch_extent_rebalance, which indicates that
this extent has work that needs to be done in the background - and which
options to use. This allows per-inode options to be propagated to
indirect extents - at least in some circumstances. In this patch,
changing IO options on a file will not propagate the new options to
indirect extents pointed to by that file.
Updating (setting/clearing) the rebalance_work btree is done by the
extent trigger, which looks at the bch_extent_rebalance field.
Scanning is still requrired after changing IO path options - either just
for a given inode, or for the whole filesystem. We indicate that
scanning is required by adding a KEY_TYPE_cookie key to the
rebalance_work btree: the cookie counter is so that we can detect that
scanning is still required when an option has been flipped mid-way
through an existing scan.
Future possible work:
- Propagate options to indirect extents when being changed
- Add other IO path options - nr_replicas, ec, to rebalance_work so
they can be applied in the background when they change
- Add a counter, for bcachefs fs usage output, showing the pending
amount of rebalance work: we'll probably want to do this after the
disk space accounting rewrite (moving it to a new btree)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-10-21 01:33:14 +08:00
|
|
|
bch2_set_rebalance_needs_scan(c, 0);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2022-03-07 04:15:41 +08:00
|
|
|
ret = size;
|
|
|
|
err:
|
2023-02-10 01:21:45 +08:00
|
|
|
bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
|
2022-03-07 04:15:41 +08:00
|
|
|
return ret;
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
SYSFS_OPS(bch2_fs_opts_dir);
|
|
|
|
|
|
|
|
struct attribute *bch2_fs_opts_dir_files[] = { NULL };
|
|
|
|
|
|
|
|
int bch2_opts_create_sysfs_files(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
const struct bch_option *i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = bch2_opt_table;
|
|
|
|
i < bch2_opt_table + bch2_opts_nr;
|
|
|
|
i++) {
|
2021-12-15 03:24:41 +08:00
|
|
|
if (!(i->flags & OPT_FS))
|
2017-03-17 14:18:50 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = sysfs_create_file(kobj, &i->attr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* time stats */
|
|
|
|
|
|
|
|
SHOW(bch2_fs_time_stats)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
|
|
|
|
|
2020-07-26 05:06:11 +08:00
|
|
|
#define x(name) \
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_time_stat_##name) \
|
|
|
|
bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
|
2017-03-17 14:18:50 +08:00
|
|
|
BCH_TIME_STATS()
|
|
|
|
#undef x
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
STORE(bch2_fs_time_stats)
|
|
|
|
{
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
SYSFS_OPS(bch2_fs_time_stats);
|
|
|
|
|
|
|
|
struct attribute *bch2_fs_time_stats_files[] = {
|
|
|
|
#define x(name) \
|
|
|
|
&sysfs_time_stat_##name,
|
|
|
|
BCH_TIME_STATS()
|
|
|
|
#undef x
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2020-07-26 05:06:11 +08:00
|
|
|
static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
|
|
|
struct bch_fs *c = ca->fs;
|
2020-07-23 01:27:00 +08:00
|
|
|
struct bch_dev_usage stats = bch2_dev_usage_read(ca);
|
2019-01-20 02:13:29 +08:00
|
|
|
unsigned i, nr[BCH_DATA_NR];
|
|
|
|
|
|
|
|
memset(nr, 0, sizeof(nr));
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
|
2021-12-26 10:21:46 +08:00
|
|
|
nr[c->open_buckets[i].data_type]++;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2023-03-02 14:08:46 +08:00
|
|
|
printbuf_tabstop_push(out, 8);
|
|
|
|
printbuf_tabstop_push(out, 16);
|
|
|
|
printbuf_tabstop_push(out, 16);
|
|
|
|
printbuf_tabstop_push(out, 16);
|
|
|
|
printbuf_tabstop_push(out, 16);
|
|
|
|
|
2023-11-24 07:25:31 +08:00
|
|
|
bch2_dev_usage_to_text(out, &stats);
|
2023-03-02 14:08:46 +08:00
|
|
|
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_printf(out, "reserves:");
|
|
|
|
prt_newline(out);
|
2023-06-25 07:30:10 +08:00
|
|
|
for (i = 0; i < BCH_WATERMARK_NR; i++) {
|
|
|
|
prt_str(out, bch2_watermarks[i]);
|
2023-03-02 14:08:46 +08:00
|
|
|
prt_tab(out);
|
|
|
|
prt_u64(out, bch2_dev_buckets_reserved(ca, i));
|
|
|
|
prt_tab_rjust(out);
|
|
|
|
prt_newline(out);
|
|
|
|
}
|
|
|
|
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
printbuf_tabstops_reset(out);
|
|
|
|
printbuf_tabstop_push(out, 24);
|
|
|
|
|
|
|
|
prt_str(out, "freelist_wait");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "open buckets allocated");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "open buckets this dev");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_u64(out, ca->nr_open_buckets);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "open buckets total");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_u64(out, OPEN_BUCKETS_COUNT);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "open_buckets_wait");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "open_buckets_btree");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_u64(out, nr[BCH_DATA_btree]);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "open_buckets_user");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_u64(out, nr[BCH_DATA_user]);
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "buckets_to_invalidate");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_u64(out, should_invalidate_buckets(ca, stats));
|
|
|
|
prt_newline(out);
|
|
|
|
|
|
|
|
prt_str(out, "btree reserve cache");
|
|
|
|
prt_tab(out);
|
|
|
|
prt_u64(out, c->btree_reserve_cache_nr);
|
|
|
|
prt_newline(out);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const char * const bch2_rw[] = {
|
|
|
|
"read",
|
|
|
|
"write",
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2023-10-26 04:29:37 +08:00
|
|
|
static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
|
2017-03-17 14:18:50 +08:00
|
|
|
{
|
2019-02-07 00:42:13 +08:00
|
|
|
int rw, i;
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
for (rw = 0; rw < 2; rw++) {
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "%s:\n", bch2_rw[rw]);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
2019-02-07 00:42:13 +08:00
|
|
|
for (i = 1; i < BCH_DATA_NR; i++)
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_printf(out, "%-12s:%12llu\n",
|
2024-01-07 09:57:43 +08:00
|
|
|
bch2_data_type_str(i),
|
2019-02-07 00:42:13 +08:00
|
|
|
percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SHOW(bch2_dev)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
|
|
|
|
struct bch_fs *c = ca->fs;
|
|
|
|
|
|
|
|
sysfs_printf(uuid, "%pU\n", ca->uuid.b);
|
|
|
|
|
|
|
|
sysfs_print(bucket_size, bucket_bytes(ca));
|
|
|
|
sysfs_print(first_bucket, ca->mi.first_bucket);
|
|
|
|
sysfs_print(nbuckets, ca->mi.nbuckets);
|
|
|
|
sysfs_print(durability, ca->mi.durability);
|
|
|
|
sysfs_print(discard, ca->mi.discard);
|
|
|
|
|
|
|
|
if (attr == &sysfs_label) {
|
2023-10-22 23:12:14 +08:00
|
|
|
if (ca->mi.group)
|
|
|
|
bch2_disk_path_to_text(out, c, ca->mi.group - 1);
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_char(out, '\n');
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (attr == &sysfs_has_data) {
|
2024-01-07 09:57:43 +08:00
|
|
|
prt_bitflags(out, __bch2_data_types, bch2_dev_has_data(c, ca));
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_char(out, '\n');
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (attr == &sysfs_state_rw) {
|
2023-02-04 10:01:40 +08:00
|
|
|
prt_string_option(out, bch2_member_states, ca->mi.state);
|
|
|
|
prt_char(out, '\n');
|
2017-03-17 14:18:50 +08:00
|
|
|
}
|
|
|
|
|
2023-10-26 04:29:37 +08:00
|
|
|
if (attr == &sysfs_io_done)
|
|
|
|
dev_io_done_to_text(out, ca);
|
|
|
|
|
|
|
|
if (attr == &sysfs_io_errors)
|
|
|
|
bch2_dev_io_errors_to_text(out, ca);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
|
|
|
|
sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
|
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_io_latency_stats_read)
|
2024-02-02 04:41:42 +08:00
|
|
|
bch2_time_stats_to_text(out, &ca->io_latency[READ].stats);
|
2022-02-26 02:18:19 +08:00
|
|
|
|
|
|
|
if (attr == &sysfs_io_latency_stats_write)
|
2024-02-02 04:41:42 +08:00
|
|
|
bch2_time_stats_to_text(out, &ca->io_latency[WRITE].stats);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
sysfs_printf(congested, "%u%%",
|
|
|
|
clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
|
|
|
|
* 100 / CONGESTED_MAX);
|
|
|
|
|
2022-02-26 02:18:19 +08:00
|
|
|
if (attr == &sysfs_alloc_debug)
|
|
|
|
dev_alloc_debug_to_text(out, ca);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
STORE(bch2_dev)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
|
|
|
|
struct bch_fs *c = ca->fs;
|
|
|
|
struct bch_member *mi;
|
|
|
|
|
|
|
|
if (attr == &sysfs_discard) {
|
|
|
|
bool v = strtoul_or_return(buf);
|
|
|
|
|
|
|
|
mutex_lock(&c->sb_lock);
|
2023-09-25 12:02:56 +08:00
|
|
|
mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
if (v != BCH_MEMBER_DISCARD(mi)) {
|
|
|
|
SET_BCH_MEMBER_DISCARD(mi, v);
|
|
|
|
bch2_write_super(c);
|
|
|
|
}
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
}
|
|
|
|
|
2022-05-25 12:11:56 +08:00
|
|
|
if (attr == &sysfs_durability) {
|
|
|
|
u64 v = strtoul_or_return(buf);
|
|
|
|
|
|
|
|
mutex_lock(&c->sb_lock);
|
2023-09-25 12:02:56 +08:00
|
|
|
mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
|
2022-05-25 12:11:56 +08:00
|
|
|
|
2023-09-22 05:25:54 +08:00
|
|
|
if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
|
2022-05-25 12:11:56 +08:00
|
|
|
SET_BCH_MEMBER_DURABILITY(mi, v + 1);
|
|
|
|
bch2_write_super(c);
|
|
|
|
}
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
}
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
if (attr == &sysfs_label) {
|
|
|
|
char *tmp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
tmp = kstrdup(buf, GFP_KERNEL);
|
|
|
|
if (!tmp)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = bch2_dev_group_set(c, ca, strim(tmp));
|
|
|
|
kfree(tmp);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-10-26 04:29:37 +08:00
|
|
|
if (attr == &sysfs_io_errors_reset)
|
|
|
|
bch2_dev_errors_reset(ca);
|
|
|
|
|
2017-03-17 14:18:50 +08:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
SYSFS_OPS(bch2_dev);
|
|
|
|
|
|
|
|
struct attribute *bch2_dev_files[] = {
|
|
|
|
&sysfs_uuid,
|
|
|
|
&sysfs_bucket_size,
|
|
|
|
&sysfs_first_bucket,
|
|
|
|
&sysfs_nbuckets,
|
|
|
|
&sysfs_durability,
|
|
|
|
|
|
|
|
/* settings: */
|
|
|
|
&sysfs_discard,
|
|
|
|
&sysfs_state_rw,
|
|
|
|
&sysfs_label,
|
|
|
|
|
|
|
|
&sysfs_has_data,
|
2023-10-26 04:29:37 +08:00
|
|
|
&sysfs_io_done,
|
|
|
|
&sysfs_io_errors,
|
|
|
|
&sysfs_io_errors_reset,
|
2017-03-17 14:18:50 +08:00
|
|
|
|
|
|
|
&sysfs_io_latency_read,
|
|
|
|
&sysfs_io_latency_write,
|
|
|
|
&sysfs_io_latency_stats_read,
|
|
|
|
&sysfs_io_latency_stats_write,
|
|
|
|
&sysfs_congested,
|
|
|
|
|
|
|
|
/* debug: */
|
|
|
|
&sysfs_alloc_debug,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* _BCACHEFS_SYSFS_H_ */
|