2018-06-06 10:42:14 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2005-11-02 11:58:39 +08:00
|
|
|
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
|
2015-10-12 02:19:45 +08:00
|
|
|
struct xstats xfsstats;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-10-12 02:19:45 +08:00
|
|
|
static int counter_val(struct xfsstats __percpu *stats, int idx)
|
2012-03-13 16:52:33 +08:00
|
|
|
{
|
|
|
|
int val = 0, cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
2015-10-12 02:19:45 +08:00
|
|
|
val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
|
2012-03-13 16:52:33 +08:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2015-10-12 02:19:45 +08:00
|
|
|
int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
|
2015-10-12 02:15:45 +08:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
int len = 0;
|
2017-06-17 02:00:05 +08:00
|
|
|
uint64_t xs_xstrat_bytes = 0;
|
|
|
|
uint64_t xs_write_bytes = 0;
|
|
|
|
uint64_t xs_read_bytes = 0;
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-28 07:18:13 +08:00
|
|
|
uint64_t defer_relog = 0;
|
2015-10-12 02:15:45 +08:00
|
|
|
|
|
|
|
static const struct xstats_entry {
|
|
|
|
char *desc;
|
|
|
|
int endpoint;
|
|
|
|
} xstats[] = {
|
2018-10-18 14:21:39 +08:00
|
|
|
{ "extent_alloc", xfsstats_offset(xs_abt_lookup) },
|
|
|
|
{ "abt", xfsstats_offset(xs_blk_mapr) },
|
|
|
|
{ "blk_map", xfsstats_offset(xs_bmbt_lookup) },
|
|
|
|
{ "bmbt", xfsstats_offset(xs_dir_lookup) },
|
|
|
|
{ "dir", xfsstats_offset(xs_trans_sync) },
|
|
|
|
{ "trans", xfsstats_offset(xs_ig_attempts) },
|
|
|
|
{ "ig", xfsstats_offset(xs_log_writes) },
|
|
|
|
{ "log", xfsstats_offset(xs_try_logspace)},
|
|
|
|
{ "push_ail", xfsstats_offset(xs_xstrat_quick)},
|
|
|
|
{ "xstrat", xfsstats_offset(xs_write_calls) },
|
|
|
|
{ "rw", xfsstats_offset(xs_attr_get) },
|
|
|
|
{ "attr", xfsstats_offset(xs_iflush_count)},
|
|
|
|
{ "icluster", xfsstats_offset(vn_active) },
|
|
|
|
{ "vnodes", xfsstats_offset(xb_get) },
|
|
|
|
{ "buf", xfsstats_offset(xs_abtb_2) },
|
|
|
|
{ "abtb2", xfsstats_offset(xs_abtc_2) },
|
|
|
|
{ "abtc2", xfsstats_offset(xs_bmbt_2) },
|
|
|
|
{ "bmbt2", xfsstats_offset(xs_ibt_2) },
|
|
|
|
{ "ibt2", xfsstats_offset(xs_fibt_2) },
|
|
|
|
{ "fibt2", xfsstats_offset(xs_rmap_2) },
|
|
|
|
{ "rmapbt", xfsstats_offset(xs_refcbt_2) },
|
|
|
|
{ "refcntbt", xfsstats_offset(xs_qm_dqreclaims)},
|
2015-10-12 02:15:45 +08:00
|
|
|
/* we print both series of quota information together */
|
2018-10-18 14:21:39 +08:00
|
|
|
{ "qm", xfsstats_offset(xs_xstrat_bytes)},
|
2015-10-12 02:15:45 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Loop over all stats groups */
|
|
|
|
|
|
|
|
for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
|
2020-03-12 02:15:35 +08:00
|
|
|
len += scnprintf(buf + len, PATH_MAX - len, "%s",
|
2015-10-12 02:15:45 +08:00
|
|
|
xstats[i].desc);
|
|
|
|
/* inner loop does each group */
|
|
|
|
for (; j < xstats[i].endpoint; j++)
|
2020-03-12 02:15:35 +08:00
|
|
|
len += scnprintf(buf + len, PATH_MAX - len, " %u",
|
2015-10-12 02:19:45 +08:00
|
|
|
counter_val(stats, j));
|
2020-03-12 02:15:35 +08:00
|
|
|
len += scnprintf(buf + len, PATH_MAX - len, "\n");
|
2015-10-12 02:15:45 +08:00
|
|
|
}
|
|
|
|
/* extra precision counters */
|
|
|
|
for_each_possible_cpu(i) {
|
2016-12-05 11:38:58 +08:00
|
|
|
xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
|
|
|
|
xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
|
|
|
|
xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-28 07:18:13 +08:00
|
|
|
defer_relog += per_cpu_ptr(stats, i)->s.defer_relog;
|
2015-10-12 02:15:45 +08:00
|
|
|
}
|
|
|
|
|
2022-09-19 04:47:14 +08:00
|
|
|
len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n",
|
2015-10-12 02:15:45 +08:00
|
|
|
xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-28 07:18:13 +08:00
|
|
|
len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n",
|
|
|
|
defer_relog);
|
2020-03-12 02:15:35 +08:00
|
|
|
len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
|
2015-10-12 02:15:45 +08:00
|
|
|
#if defined(DEBUG)
|
|
|
|
1);
|
|
|
|
#else
|
|
|
|
0);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2015-10-12 02:19:45 +08:00
|
|
|
void xfs_stats_clearall(struct xfsstats __percpu *stats)
|
2015-10-12 02:15:45 +08:00
|
|
|
{
|
|
|
|
int c;
|
2017-06-17 02:00:05 +08:00
|
|
|
uint32_t vn_active;
|
2015-10-12 02:15:45 +08:00
|
|
|
|
|
|
|
xfs_notice(NULL, "Clearing xfsstats");
|
|
|
|
for_each_possible_cpu(c) {
|
|
|
|
preempt_disable();
|
|
|
|
/* save vn_active, it's a universal truth! */
|
2016-12-05 11:38:58 +08:00
|
|
|
vn_active = per_cpu_ptr(stats, c)->s.vn_active;
|
2015-10-12 02:19:45 +08:00
|
|
|
memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
|
2016-12-05 11:38:58 +08:00
|
|
|
per_cpu_ptr(stats, c)->s.vn_active = vn_active;
|
2015-10-12 02:15:45 +08:00
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-25 23:16:04 +08:00
|
|
|
#ifdef CONFIG_PROC_FS
|
2012-03-13 16:52:33 +08:00
|
|
|
/* legacy quota interfaces */
|
|
|
|
#ifdef CONFIG_XFS_QUOTA
|
2018-10-18 14:21:39 +08:00
|
|
|
|
|
|
|
#define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
|
|
|
|
#define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
|
|
|
|
|
2012-03-13 16:52:33 +08:00
|
|
|
static int xqm_proc_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
/* maximum; incore; ratio free to inuse; freelist */
|
|
|
|
seq_printf(m, "%d\t%d\t%d\t%u\n",
|
2015-10-12 02:19:45 +08:00
|
|
|
0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
|
|
|
|
0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
|
2012-03-13 16:52:33 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* legacy quota stats interface no 2 */
|
|
|
|
static int xqmstat_proc_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
int j;
|
|
|
|
|
2022-09-19 04:48:14 +08:00
|
|
|
seq_puts(m, "qm");
|
2018-10-18 14:21:39 +08:00
|
|
|
for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
|
2015-10-12 02:19:45 +08:00
|
|
|
seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
|
2012-03-13 16:52:33 +08:00
|
|
|
seq_putc(m, '\n');
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_XFS_QUOTA */
|
|
|
|
|
2008-07-18 15:11:46 +08:00
|
|
|
int
|
2005-04-17 06:20:36 +08:00
|
|
|
xfs_init_procfs(void)
|
|
|
|
{
|
|
|
|
if (!proc_mkdir("fs/xfs", NULL))
|
2015-10-12 15:21:22 +08:00
|
|
|
return -ENOMEM;
|
2008-07-18 15:11:46 +08:00
|
|
|
|
2015-10-12 02:16:45 +08:00
|
|
|
if (!proc_symlink("fs/xfs/stat", NULL,
|
|
|
|
"/sys/fs/xfs/stats/stats"))
|
2015-10-12 15:21:22 +08:00
|
|
|
goto out;
|
2015-10-12 02:16:45 +08:00
|
|
|
|
2012-03-13 16:52:33 +08:00
|
|
|
#ifdef CONFIG_XFS_QUOTA
|
2018-05-15 21:57:23 +08:00
|
|
|
if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
|
2015-10-12 15:21:22 +08:00
|
|
|
goto out;
|
2018-05-15 21:57:23 +08:00
|
|
|
if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
|
2015-10-12 15:21:22 +08:00
|
|
|
goto out;
|
2012-03-13 16:52:33 +08:00
|
|
|
#endif
|
2008-07-18 15:11:46 +08:00
|
|
|
return 0;
|
|
|
|
|
2015-10-12 15:21:22 +08:00
|
|
|
out:
|
|
|
|
remove_proc_subtree("fs/xfs", NULL);
|
2008-07-18 15:11:46 +08:00
|
|
|
return -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_cleanup_procfs(void)
|
|
|
|
{
|
2015-10-12 15:21:22 +08:00
|
|
|
remove_proc_subtree("fs/xfs", NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2015-10-19 05:42:46 +08:00
|
|
|
#endif /* CONFIG_PROC_FS */
|