2020-11-03 19:32:41 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
/* Netfs support statistics
|
|
|
|
*
|
|
|
|
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include "internal.h"
|
|
|
|
|
netfs: Implement unbuffered/DIO read support
Implement support for unbuffered and DIO reads in the netfs library,
utilising the existing read helper code to do block splitting and
individual queuing. The code also handles extraction of the destination
buffer from the supplied iterator, allowing async unbuffered reads to take
place.
The read will be split up according to the rsize setting and, if supplied,
the ->clamp_length() method. Note that the next subrequest will be issued
as soon as issue_op returns, without waiting for previous ones to finish.
The network filesystem needs to pause or handle queuing them if it doesn't
want to fire them all at the server simultaneously.
Once all the subrequests have finished, the state will be assessed and the
amount of data to be indicated as having being obtained will be
determined. As the subrequests may finish in any order, if an intermediate
subrequest is short, any further subrequests may be copied into the buffer
and then abandoned.
In the future, this will also take care of doing an unbuffered read from
encrypted content, with the decryption being done by the library.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
2022-01-15 01:39:55 +08:00
|
|
|
atomic_t netfs_n_rh_dio_read;
|
2020-11-03 19:32:41 +08:00
|
|
|
atomic_t netfs_n_rh_readahead;
|
2024-03-26 16:48:44 +08:00
|
|
|
atomic_t netfs_n_rh_read_folio;
|
2020-11-03 19:32:41 +08:00
|
|
|
atomic_t netfs_n_rh_rreq;
|
|
|
|
atomic_t netfs_n_rh_sreq;
|
|
|
|
atomic_t netfs_n_rh_download;
|
|
|
|
atomic_t netfs_n_rh_download_done;
|
|
|
|
atomic_t netfs_n_rh_download_failed;
|
|
|
|
atomic_t netfs_n_rh_download_instead;
|
|
|
|
atomic_t netfs_n_rh_read;
|
|
|
|
atomic_t netfs_n_rh_read_done;
|
|
|
|
atomic_t netfs_n_rh_read_failed;
|
|
|
|
atomic_t netfs_n_rh_zero;
|
|
|
|
atomic_t netfs_n_rh_short_read;
|
|
|
|
atomic_t netfs_n_rh_write;
|
2020-09-22 18:06:07 +08:00
|
|
|
atomic_t netfs_n_rh_write_begin;
|
2020-11-03 19:32:41 +08:00
|
|
|
atomic_t netfs_n_rh_write_done;
|
|
|
|
atomic_t netfs_n_rh_write_failed;
|
2020-09-22 18:06:07 +08:00
|
|
|
atomic_t netfs_n_rh_write_zskip;
|
2024-03-26 16:48:44 +08:00
|
|
|
atomic_t netfs_n_wh_buffered_write;
|
|
|
|
atomic_t netfs_n_wh_writethrough;
|
|
|
|
atomic_t netfs_n_wh_dio_write;
|
|
|
|
atomic_t netfs_n_wh_writepages;
|
2024-01-04 23:52:11 +08:00
|
|
|
atomic_t netfs_n_wh_wstream_conflict;
|
2022-02-10 03:52:13 +08:00
|
|
|
atomic_t netfs_n_wh_upload;
|
|
|
|
atomic_t netfs_n_wh_upload_done;
|
|
|
|
atomic_t netfs_n_wh_upload_failed;
|
|
|
|
atomic_t netfs_n_wh_write;
|
|
|
|
atomic_t netfs_n_wh_write_done;
|
|
|
|
atomic_t netfs_n_wh_write_failed;
|
2020-11-03 19:32:41 +08:00
|
|
|
|
2023-11-21 23:43:52 +08:00
|
|
|
int netfs_stats_show(struct seq_file *m, void *v)
|
2020-11-03 19:32:41 +08:00
|
|
|
{
|
2024-03-26 16:48:44 +08:00
|
|
|
seq_printf(m, "Netfs : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n",
|
netfs: Implement unbuffered/DIO read support
Implement support for unbuffered and DIO reads in the netfs library,
utilising the existing read helper code to do block splitting and
individual queuing. The code also handles extraction of the destination
buffer from the supplied iterator, allowing async unbuffered reads to take
place.
The read will be split up according to the rsize setting and, if supplied,
the ->clamp_length() method. Note that the next subrequest will be issued
as soon as issue_op returns, without waiting for previous ones to finish.
The network filesystem needs to pause or handle queuing them if it doesn't
want to fire them all at the server simultaneously.
Once all the subrequests have finished, the state will be assessed and the
amount of data to be indicated as having being obtained will be
determined. As the subrequests may finish in any order, if an intermediate
subrequest is short, any further subrequests may be copied into the buffer
and then abandoned.
In the future, this will also take care of doing an unbuffered read from
encrypted content, with the decryption being done by the library.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
2022-01-15 01:39:55 +08:00
|
|
|
atomic_read(&netfs_n_rh_dio_read),
|
2020-11-03 19:32:41 +08:00
|
|
|
atomic_read(&netfs_n_rh_readahead),
|
2024-03-26 16:48:44 +08:00
|
|
|
atomic_read(&netfs_n_rh_read_folio),
|
2020-09-22 18:06:07 +08:00
|
|
|
atomic_read(&netfs_n_rh_write_begin),
|
2024-01-05 22:55:52 +08:00
|
|
|
atomic_read(&netfs_n_rh_write_zskip));
|
2024-03-26 16:48:44 +08:00
|
|
|
seq_printf(m, "Netfs : BW=%u WT=%u DW=%u WP=%u\n",
|
|
|
|
atomic_read(&netfs_n_wh_buffered_write),
|
|
|
|
atomic_read(&netfs_n_wh_writethrough),
|
|
|
|
atomic_read(&netfs_n_wh_dio_write),
|
|
|
|
atomic_read(&netfs_n_wh_writepages));
|
2023-11-21 23:43:52 +08:00
|
|
|
seq_printf(m, "Netfs : ZR=%u sh=%u sk=%u\n",
|
2020-11-03 19:32:41 +08:00
|
|
|
atomic_read(&netfs_n_rh_zero),
|
2020-09-22 18:06:07 +08:00
|
|
|
atomic_read(&netfs_n_rh_short_read),
|
|
|
|
atomic_read(&netfs_n_rh_write_zskip));
|
2023-11-21 23:43:52 +08:00
|
|
|
seq_printf(m, "Netfs : DL=%u ds=%u df=%u di=%u\n",
|
2020-11-03 19:32:41 +08:00
|
|
|
atomic_read(&netfs_n_rh_download),
|
|
|
|
atomic_read(&netfs_n_rh_download_done),
|
|
|
|
atomic_read(&netfs_n_rh_download_failed),
|
|
|
|
atomic_read(&netfs_n_rh_download_instead));
|
2023-11-21 23:43:52 +08:00
|
|
|
seq_printf(m, "Netfs : RD=%u rs=%u rf=%u\n",
|
2020-11-03 19:32:41 +08:00
|
|
|
atomic_read(&netfs_n_rh_read),
|
|
|
|
atomic_read(&netfs_n_rh_read_done),
|
|
|
|
atomic_read(&netfs_n_rh_read_failed));
|
2022-02-10 03:52:13 +08:00
|
|
|
seq_printf(m, "Netfs : UL=%u us=%u uf=%u\n",
|
|
|
|
atomic_read(&netfs_n_wh_upload),
|
|
|
|
atomic_read(&netfs_n_wh_upload_done),
|
|
|
|
atomic_read(&netfs_n_wh_upload_failed));
|
2023-11-21 23:43:52 +08:00
|
|
|
seq_printf(m, "Netfs : WR=%u ws=%u wf=%u\n",
|
2022-02-10 03:52:13 +08:00
|
|
|
atomic_read(&netfs_n_wh_write),
|
|
|
|
atomic_read(&netfs_n_wh_write_done),
|
|
|
|
atomic_read(&netfs_n_wh_write_failed));
|
2024-01-04 23:52:11 +08:00
|
|
|
seq_printf(m, "Netfs : rr=%u sr=%u wsc=%u\n",
|
2024-01-05 22:55:52 +08:00
|
|
|
atomic_read(&netfs_n_rh_rreq),
|
2024-01-04 23:52:11 +08:00
|
|
|
atomic_read(&netfs_n_rh_sreq),
|
|
|
|
atomic_read(&netfs_n_wh_wstream_conflict));
|
2023-11-21 23:43:52 +08:00
|
|
|
return fscache_stats_show(m);
|
2020-11-03 19:32:41 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netfs_stats_show);
|