2021-08-20 11:52:28 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* f2fs iostat support
|
|
|
|
*
|
|
|
|
* Copyright 2021 Google LLC
|
|
|
|
* Author: Daeho Jeong <daehojeong@google.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/f2fs_fs.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
|
|
|
|
#include "f2fs.h"
|
|
|
|
#include "iostat.h"
|
|
|
|
#include <trace/events/f2fs.h>
|
|
|
|
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
static struct kmem_cache *bio_iostat_ctx_cache;
|
|
|
|
static mempool_t *bio_iostat_ctx_pool;
|
|
|
|
|
2022-12-22 03:19:32 +08:00
|
|
|
static inline unsigned long long iostat_get_avg_bytes(struct f2fs_sb_info *sbi,
|
|
|
|
enum iostat_type type)
|
|
|
|
{
|
|
|
|
return sbi->iostat_count[type] ? div64_u64(sbi->iostat_bytes[type],
|
|
|
|
sbi->iostat_count[type]) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define IOSTAT_INFO_SHOW(name, type) \
|
|
|
|
seq_printf(seq, "%-23s %-16llu %-16llu %-16llu\n", \
|
|
|
|
name":", sbi->iostat_bytes[type], \
|
|
|
|
sbi->iostat_count[type], \
|
|
|
|
iostat_get_avg_bytes(sbi, type))
|
|
|
|
|
2021-08-20 11:52:28 +08:00
|
|
|
int __maybe_unused iostat_info_seq_show(struct seq_file *seq, void *offset)
|
|
|
|
{
|
|
|
|
struct super_block *sb = seq->private;
|
|
|
|
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
|
|
|
|
|
|
|
if (!sbi->iostat_enable)
|
|
|
|
return 0;
|
|
|
|
|
2022-12-22 03:19:32 +08:00
|
|
|
seq_printf(seq, "time: %-16llu\n", ktime_get_real_seconds());
|
|
|
|
seq_printf(seq, "\t\t\t%-16s %-16s %-16s\n",
|
|
|
|
"io_bytes", "count", "avg_bytes");
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
/* print app write IOs */
|
|
|
|
seq_puts(seq, "[WRITE]\n");
|
2022-12-22 03:19:32 +08:00
|
|
|
IOSTAT_INFO_SHOW("app buffered data", APP_BUFFERED_IO);
|
|
|
|
IOSTAT_INFO_SHOW("app direct data", APP_DIRECT_IO);
|
|
|
|
IOSTAT_INFO_SHOW("app mapped data", APP_MAPPED_IO);
|
|
|
|
IOSTAT_INFO_SHOW("app buffered cdata", APP_BUFFERED_CDATA_IO);
|
|
|
|
IOSTAT_INFO_SHOW("app mapped cdata", APP_MAPPED_CDATA_IO);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
/* print fs write IOs */
|
2022-12-22 03:19:32 +08:00
|
|
|
IOSTAT_INFO_SHOW("fs data", FS_DATA_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs cdata", FS_CDATA_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs node", FS_NODE_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs meta", FS_META_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs gc data", FS_GC_DATA_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs gc node", FS_GC_NODE_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs cp data", FS_CP_DATA_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs cp node", FS_CP_NODE_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs cp meta", FS_CP_META_IO);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
/* print app read IOs */
|
|
|
|
seq_puts(seq, "[READ]\n");
|
2022-12-22 03:19:32 +08:00
|
|
|
IOSTAT_INFO_SHOW("app buffered data", APP_BUFFERED_READ_IO);
|
|
|
|
IOSTAT_INFO_SHOW("app direct data", APP_DIRECT_READ_IO);
|
|
|
|
IOSTAT_INFO_SHOW("app mapped data", APP_MAPPED_READ_IO);
|
|
|
|
IOSTAT_INFO_SHOW("app buffered cdata", APP_BUFFERED_CDATA_READ_IO);
|
|
|
|
IOSTAT_INFO_SHOW("app mapped cdata", APP_MAPPED_CDATA_READ_IO);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
/* print fs read IOs */
|
2022-12-22 03:19:32 +08:00
|
|
|
IOSTAT_INFO_SHOW("fs data", FS_DATA_READ_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs gc data", FS_GDATA_READ_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs cdata", FS_CDATA_READ_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs node", FS_NODE_READ_IO);
|
|
|
|
IOSTAT_INFO_SHOW("fs meta", FS_META_READ_IO);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
/* print other IOs */
|
|
|
|
seq_puts(seq, "[OTHER]\n");
|
2022-12-22 03:19:32 +08:00
|
|
|
IOSTAT_INFO_SHOW("fs discard", FS_DISCARD_IO);
|
2022-12-22 03:20:01 +08:00
|
|
|
IOSTAT_INFO_SHOW("fs flush", FS_FLUSH_IO);
|
2023-05-08 16:10:42 +08:00
|
|
|
IOSTAT_INFO_SHOW("fs zone reset", FS_ZONE_RESET_IO);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
2022-12-22 16:18:55 +08:00
|
|
|
int io, idx;
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
|
|
|
|
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
|
2022-06-11 02:32:40 +08:00
|
|
|
unsigned long flags;
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
for (idx = 0; idx < MAX_IO_TYPE; idx++) {
|
|
|
|
for (io = 0; io < NR_PAGE_TYPE; io++) {
|
|
|
|
iostat_lat[idx][io].peak_lat =
|
|
|
|
jiffies_to_msecs(io_lat->peak_lat[idx][io]);
|
2022-12-22 16:18:55 +08:00
|
|
|
iostat_lat[idx][io].cnt = io_lat->bio_cnt[idx][io];
|
|
|
|
iostat_lat[idx][io].avg_lat = iostat_lat[idx][io].cnt ?
|
|
|
|
jiffies_to_msecs(io_lat->sum_lat[idx][io]) / iostat_lat[idx][io].cnt : 0;
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
io_lat->sum_lat[idx][io] = 0;
|
|
|
|
io_lat->peak_lat[idx][io] = 0;
|
|
|
|
io_lat->bio_cnt[idx][io] = 0;
|
|
|
|
}
|
|
|
|
}
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
|
|
|
|
trace_f2fs_iostat_latency(sbi, iostat_lat);
|
|
|
|
}
|
|
|
|
|
2021-08-20 11:52:28 +08:00
|
|
|
static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
unsigned long long iostat_diff[NR_IO_TYPE];
|
|
|
|
int i;
|
2022-06-11 02:32:40 +08:00
|
|
|
unsigned long flags;
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
if (time_is_after_jiffies(sbi->iostat_next_period))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Need double check under the lock */
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_lock_irqsave(&sbi->iostat_lock, flags);
|
2021-08-20 11:52:28 +08:00
|
|
|
if (time_is_after_jiffies(sbi->iostat_next_period)) {
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
|
2021-08-20 11:52:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
sbi->iostat_next_period = jiffies +
|
|
|
|
msecs_to_jiffies(sbi->iostat_period_ms);
|
|
|
|
|
|
|
|
for (i = 0; i < NR_IO_TYPE; i++) {
|
2022-12-22 03:19:32 +08:00
|
|
|
iostat_diff[i] = sbi->iostat_bytes[i] -
|
|
|
|
sbi->prev_iostat_bytes[i];
|
|
|
|
sbi->prev_iostat_bytes[i] = sbi->iostat_bytes[i];
|
2021-08-20 11:52:28 +08:00
|
|
|
}
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
trace_f2fs_iostat(sbi, iostat_diff);
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
|
|
|
|
__record_iostat_latency(sbi);
|
2021-08-20 11:52:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
|
2021-08-20 11:52:28 +08:00
|
|
|
int i;
|
|
|
|
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_lock_irq(&sbi->iostat_lock);
|
2021-08-20 11:52:28 +08:00
|
|
|
for (i = 0; i < NR_IO_TYPE; i++) {
|
2022-12-22 03:19:32 +08:00
|
|
|
sbi->iostat_count[i] = 0;
|
|
|
|
sbi->iostat_bytes[i] = 0;
|
|
|
|
sbi->prev_iostat_bytes[i] = 0;
|
2021-08-20 11:52:28 +08:00
|
|
|
}
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_unlock_irq(&sbi->iostat_lock);
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_lock_irq(&sbi->iostat_lat_lock);
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
memset(io_lat, 0, sizeof(struct iostat_lat_info));
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_unlock_irq(&sbi->iostat_lat_lock);
|
2021-08-20 11:52:28 +08:00
|
|
|
}
|
|
|
|
|
2022-12-22 03:19:32 +08:00
|
|
|
static inline void __f2fs_update_iostat(struct f2fs_sb_info *sbi,
|
|
|
|
enum iostat_type type, unsigned long long io_bytes)
|
|
|
|
{
|
|
|
|
sbi->iostat_bytes[type] += io_bytes;
|
|
|
|
sbi->iostat_count[type]++;
|
|
|
|
}
|
|
|
|
|
2022-08-20 11:04:41 +08:00
|
|
|
void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
|
2021-08-20 11:52:28 +08:00
|
|
|
enum iostat_type type, unsigned long long io_bytes)
|
|
|
|
{
|
2022-06-11 02:32:40 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2021-08-20 11:52:28 +08:00
|
|
|
if (!sbi->iostat_enable)
|
|
|
|
return;
|
|
|
|
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_lock_irqsave(&sbi->iostat_lock, flags);
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, type, io_bytes);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
2021-07-23 15:59:21 +08:00
|
|
|
if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, APP_WRITE_IO, io_bytes);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
2021-07-23 15:59:21 +08:00
|
|
|
if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, APP_READ_IO, io_bytes);
|
2021-07-23 15:59:21 +08:00
|
|
|
|
2022-08-20 11:04:41 +08:00
|
|
|
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
|
|
|
if (inode && f2fs_compressed_file(inode)) {
|
|
|
|
if (type == APP_BUFFERED_IO)
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, APP_BUFFERED_CDATA_IO, io_bytes);
|
2022-08-20 11:04:41 +08:00
|
|
|
|
|
|
|
if (type == APP_BUFFERED_READ_IO)
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, APP_BUFFERED_CDATA_READ_IO, io_bytes);
|
2022-08-20 11:04:41 +08:00
|
|
|
|
|
|
|
if (type == APP_MAPPED_READ_IO)
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, APP_MAPPED_CDATA_READ_IO, io_bytes);
|
2022-08-20 11:04:41 +08:00
|
|
|
|
|
|
|
if (type == APP_MAPPED_IO)
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, APP_MAPPED_CDATA_IO, io_bytes);
|
2022-08-20 11:04:41 +08:00
|
|
|
|
|
|
|
if (type == FS_DATA_READ_IO)
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, FS_CDATA_READ_IO, io_bytes);
|
2022-08-20 11:04:41 +08:00
|
|
|
|
|
|
|
if (type == FS_DATA_IO)
|
2022-12-22 03:19:32 +08:00
|
|
|
__f2fs_update_iostat(sbi, FS_CDATA_IO, io_bytes);
|
2022-08-20 11:04:41 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
f2fs_record_iostat(sbi);
|
|
|
|
}
|
|
|
|
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
|
2023-02-01 18:47:02 +08:00
|
|
|
enum iostat_lat_type lat_type)
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
{
|
|
|
|
unsigned long ts_diff;
|
2023-02-01 18:47:02 +08:00
|
|
|
unsigned int page_type = iostat_ctx->type;
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
struct f2fs_sb_info *sbi = iostat_ctx->sbi;
|
|
|
|
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
|
2022-06-11 02:32:40 +08:00
|
|
|
unsigned long flags;
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
|
|
|
|
if (!sbi->iostat_enable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ts_diff = jiffies - iostat_ctx->submit_ts;
|
2023-02-01 18:47:02 +08:00
|
|
|
if (page_type == META_FLUSH) {
|
|
|
|
page_type = META;
|
|
|
|
} else if (page_type >= NR_PAGE_TYPE) {
|
|
|
|
f2fs_warn(sbi, "%s: %d over NR_PAGE_TYPE", __func__, page_type);
|
2023-01-21 00:16:55 +08:00
|
|
|
return;
|
|
|
|
}
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
|
2023-02-01 18:47:02 +08:00
|
|
|
io_lat->sum_lat[lat_type][page_type] += ts_diff;
|
|
|
|
io_lat->bio_cnt[lat_type][page_type]++;
|
|
|
|
if (ts_diff > io_lat->peak_lat[lat_type][page_type])
|
|
|
|
io_lat->peak_lat[lat_type][page_type] = ts_diff;
|
2022-06-11 02:32:40 +08:00
|
|
|
spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
}
|
|
|
|
|
2023-02-01 18:47:02 +08:00
|
|
|
void iostat_update_and_unbind_ctx(struct bio *bio)
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
{
|
|
|
|
struct bio_iostat_ctx *iostat_ctx = bio->bi_private;
|
2023-02-01 18:47:02 +08:00
|
|
|
enum iostat_lat_type lat_type;
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
|
2023-02-01 18:47:02 +08:00
|
|
|
if (op_is_write(bio_op(bio))) {
|
|
|
|
lat_type = bio->bi_opf & REQ_SYNC ?
|
|
|
|
WRITE_SYNC_IO : WRITE_ASYNC_IO;
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
bio->bi_private = iostat_ctx->sbi;
|
2023-02-01 18:47:02 +08:00
|
|
|
} else {
|
|
|
|
lat_type = READ_IO;
|
|
|
|
bio->bi_private = iostat_ctx->post_read_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
__update_iostat_latency(iostat_ctx, lat_type);
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
mempool_free(iostat_ctx, bio_iostat_ctx_pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
|
|
|
|
struct bio *bio, struct bio_post_read_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct bio_iostat_ctx *iostat_ctx;
|
|
|
|
/* Due to the mempool, this never fails. */
|
|
|
|
iostat_ctx = mempool_alloc(bio_iostat_ctx_pool, GFP_NOFS);
|
|
|
|
iostat_ctx->sbi = sbi;
|
|
|
|
iostat_ctx->submit_ts = 0;
|
|
|
|
iostat_ctx->type = 0;
|
|
|
|
iostat_ctx->post_read_ctx = ctx;
|
|
|
|
bio->bi_private = iostat_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __init f2fs_init_iostat_processing(void)
|
|
|
|
{
|
|
|
|
bio_iostat_ctx_cache =
|
|
|
|
kmem_cache_create("f2fs_bio_iostat_ctx",
|
|
|
|
sizeof(struct bio_iostat_ctx), 0, 0, NULL);
|
|
|
|
if (!bio_iostat_ctx_cache)
|
|
|
|
goto fail;
|
|
|
|
bio_iostat_ctx_pool =
|
|
|
|
mempool_create_slab_pool(NUM_PREALLOC_IOSTAT_CTXS,
|
|
|
|
bio_iostat_ctx_cache);
|
|
|
|
if (!bio_iostat_ctx_pool)
|
|
|
|
goto fail_free_cache;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail_free_cache:
|
|
|
|
kmem_cache_destroy(bio_iostat_ctx_cache);
|
|
|
|
fail:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_destroy_iostat_processing(void)
|
|
|
|
{
|
|
|
|
mempool_destroy(bio_iostat_ctx_pool);
|
|
|
|
kmem_cache_destroy(bio_iostat_ctx_cache);
|
|
|
|
}
|
|
|
|
|
2021-08-20 11:52:28 +08:00
|
|
|
int f2fs_init_iostat(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
/* init iostat info */
|
|
|
|
spin_lock_init(&sbi->iostat_lock);
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
spin_lock_init(&sbi->iostat_lat_lock);
|
2021-08-20 11:52:28 +08:00
|
|
|
sbi->iostat_enable = false;
|
|
|
|
sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
sbi->iostat_io_lat = f2fs_kzalloc(sbi, sizeof(struct iostat_lat_info),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!sbi->iostat_io_lat)
|
|
|
|
return -ENOMEM;
|
2021-08-20 11:52:28 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
f2fs: introduce periodic iostat io latency traces
Whenever we notice some sluggish issues on our machines, we are always
curious about how well all types of I/O in the f2fs filesystem are
handled. But, it's hard to get this kind of real data. First of all,
we need to reproduce the issue while turning on the profiling tool like
blktrace, but the issue doesn't happen again easily. Second, with the
intervention of any tools, the overall timing of the issue will be
slightly changed and it sometimes makes us hard to figure it out.
So, I added the feature printing out IO latency statistics tracepoint
events, which are minimal things to understand filesystem's I/O related
behaviors, into F2FS_IOSTAT kernel config. With "iostat_enable" sysfs
node on, we can get this statistics info in a periodic way and it
would cause the least overhead.
[samples]
f2fs_ckpt-254:1-507 [003] .... 2842.439683: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [136/1/801], rd_node [136/1/1704], rd_meta [4/2/4],
wr_sync_data [164/16/3331], wr_sync_node [152/3/648],
wr_sync_meta [160/2/4243], wr_async_data [24/13/15],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
f2fs_ckpt-254:1-507 [002] .... 2845.450514: f2fs_iostat_latency:
dev = (254,11), iotype [peak lat.(ms)/avg lat.(ms)/count],
rd_data [60/3/456], rd_node [60/3/1258], rd_meta [0/0/1],
wr_sync_data [120/12/2285], wr_sync_node [88/5/428],
wr_sync_meta [52/6/2990], wr_async_data [4/1/3],
wr_async_node [0/0/0], wr_async_meta [0/0/0]
Signed-off-by: Daeho Jeong <daehojeong@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2021-08-21 06:29:09 +08:00
|
|
|
|
|
|
|
void f2fs_destroy_iostat(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
kfree(sbi->iostat_io_lat);
|
|
|
|
}
|