mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
000dbe0bec
Convert the NFS buffered read code paths to corresponding netfs APIs, but only when fscache is configured and enabled. The netfs API defines struct netfs_request_ops which must be filled in by the network filesystem. For NFS, we only need to define 5 of the functions, the main one being the issue_read() function. The issue_read() function is called by the netfs layer when a read cannot be fulfilled locally, and must be sent to the server (either the cache is not active, or it is active but the data is not available). Once the read from the server is complete, netfs requires a call to netfs_subreq_terminated() which conveys either how many bytes were read successfully, or an error. Note that issue_read() is called with a structure, netfs_io_subrequest, which defines the IO requested, and contains a start and a length (both in bytes), and assumes the underlying netfs will return a either an error on the whole region, or the number of bytes successfully read. The NFS IO path is page based and the main APIs are the pgio APIs defined in pagelist.c. For the pgio APIs, there is no way for the caller to know how many RPCs will be sent and how the pages will be broken up into underlying RPCs, each of which will have their own completion and return code. In contrast, netfs is subrequest based, a single subrequest may contain multiple pages, and a single subrequest is initiated with issue_read() and terminated with netfs_subreq_terminated(). Thus, to utilze the netfs APIs, NFS needs some way to accommodate the netfs API requirement on the single response to the whole subrequest, while also minimizing disruptive changes to the NFS pgio layer. The approach taken with this patch is to allocate a small structure for each nfs_netfs_issue_read() call, store the final error and number of bytes successfully transferred in the structure, and update these values as each RPC completes. The refcount on the structure is used as a marker for the last RPC completion, is incremented in nfs_netfs_read_initiate(), and decremented inside nfs_netfs_read_completion(), when a nfs_pgio_header contains a valid pointer to the data. On the final put (which signals the final outstanding RPC is complete) in nfs_netfs_read_completion(), call netfs_subreq_terminated() with either the final error value (if one or more READs complete with an error) or the number of bytes successfully transferred (if all RPCs complete successfully). Note that when all RPCs complete successfully, the number of bytes transferred is capped to the length of the subrequest. Capping the transferred length to the subrequest length prevents "Subreq overread" warnings from netfs. This is due to the "aligned_len" in nfs_pageio_add_page(), and the corner case where NFS requests a full page at the end of the file, even when i_size reflects only a partial page (NFS overread). Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> Tested-by: Daire Byrne <daire@dneg.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
203 lines
6.5 KiB
C
203 lines
6.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/* NFS filesystem cache interface definitions
|
|
*
|
|
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#ifndef _NFS_FSCACHE_H
|
|
#define _NFS_FSCACHE_H
|
|
|
|
#include <linux/swap.h>
|
|
#include <linux/nfs_fs.h>
|
|
#include <linux/nfs_mount.h>
|
|
#include <linux/nfs4_mount.h>
|
|
#include <linux/fscache.h>
|
|
#include <linux/iversion.h>
|
|
|
|
#ifdef CONFIG_NFS_FSCACHE
|
|
|
|
/*
|
|
* Definition of the auxiliary data attached to NFS inode storage objects
|
|
* within the cache.
|
|
*
|
|
* The contents of this struct are recorded in the on-disk local cache in the
|
|
* auxiliary data attached to the data storage object backing an inode. This
|
|
* permits coherency to be managed when a new inode binds to an already extant
|
|
* cache object.
|
|
*/
|
|
struct nfs_fscache_inode_auxdata {
|
|
s64 mtime_sec;
|
|
s64 mtime_nsec;
|
|
s64 ctime_sec;
|
|
s64 ctime_nsec;
|
|
u64 change_attr;
|
|
};
|
|
|
|
struct nfs_netfs_io_data {
|
|
/*
|
|
* NFS may split a netfs_io_subrequest into multiple RPCs, each
|
|
* with their own read completion. In netfs, we can only call
|
|
* netfs_subreq_terminated() once for each subrequest. Use the
|
|
* refcount here to double as a marker of the last RPC completion,
|
|
* and only call netfs via netfs_subreq_terminated() once.
|
|
*/
|
|
refcount_t refcount;
|
|
struct netfs_io_subrequest *sreq;
|
|
|
|
/*
|
|
* Final disposition of the netfs_io_subrequest, sent in
|
|
* netfs_subreq_terminated()
|
|
*/
|
|
atomic64_t transferred;
|
|
int error;
|
|
};
|
|
|
|
static inline void nfs_netfs_get(struct nfs_netfs_io_data *netfs)
|
|
{
|
|
refcount_inc(&netfs->refcount);
|
|
}
|
|
|
|
static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
|
|
{
|
|
ssize_t final_len;
|
|
|
|
/* Only the last RPC completion should call netfs_subreq_terminated() */
|
|
if (!refcount_dec_and_test(&netfs->refcount))
|
|
return;
|
|
|
|
/*
|
|
* The NFS pageio interface may read a complete page, even when netfs
|
|
* only asked for a partial page. Specifically, this may be seen when
|
|
* one thread is truncating a file while another one is reading the last
|
|
* page of the file.
|
|
* Correct the final length here to be no larger than the netfs subrequest
|
|
* length, and thus avoid netfs's "Subreq overread" warning message.
|
|
*/
|
|
final_len = min_t(s64, netfs->sreq->len, atomic64_read(&netfs->transferred));
|
|
netfs_subreq_terminated(netfs->sreq, netfs->error ?: final_len, false);
|
|
kfree(netfs);
|
|
}
|
|
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
|
|
{
|
|
netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops);
|
|
}
|
|
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
|
|
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
|
|
extern int nfs_netfs_folio_unlock(struct folio *folio);
|
|
|
|
/*
|
|
* fscache.c
|
|
*/
|
|
extern int nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
|
|
extern void nfs_fscache_release_super_cookie(struct super_block *);
|
|
|
|
extern void nfs_fscache_init_inode(struct inode *);
|
|
extern void nfs_fscache_clear_inode(struct inode *);
|
|
extern void nfs_fscache_open_file(struct inode *, struct file *);
|
|
extern void nfs_fscache_release_file(struct inode *, struct file *);
|
|
extern int nfs_netfs_readahead(struct readahead_control *ractl);
|
|
extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);
|
|
|
|
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
|
|
{
|
|
if (folio_test_fscache(folio)) {
|
|
if (current_is_kswapd() || !(gfp & __GFP_FS))
|
|
return false;
|
|
folio_wait_fscache(folio);
|
|
}
|
|
fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
|
|
return true;
|
|
}
|
|
|
|
static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
|
|
struct inode *inode)
|
|
{
|
|
memset(auxdata, 0, sizeof(*auxdata));
|
|
auxdata->mtime_sec = inode->i_mtime.tv_sec;
|
|
auxdata->mtime_nsec = inode->i_mtime.tv_nsec;
|
|
auxdata->ctime_sec = inode->i_ctime.tv_sec;
|
|
auxdata->ctime_nsec = inode->i_ctime.tv_nsec;
|
|
|
|
if (NFS_SERVER(inode)->nfs_client->rpc_ops->version == 4)
|
|
auxdata->change_attr = inode_peek_iversion_raw(inode);
|
|
}
|
|
|
|
/*
|
|
* Invalidate the contents of fscache for this inode. This will not sleep.
|
|
*/
|
|
static inline void nfs_fscache_invalidate(struct inode *inode, int flags)
|
|
{
|
|
struct nfs_fscache_inode_auxdata auxdata;
|
|
struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
|
|
|
|
nfs_fscache_update_auxdata(&auxdata, inode);
|
|
fscache_invalidate(cookie, &auxdata, i_size_read(inode), flags);
|
|
}
|
|
|
|
/*
|
|
* indicate the client caching state as readable text
|
|
*/
|
|
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
|
|
{
|
|
if (server->fscache)
|
|
return "yes";
|
|
return "no ";
|
|
}
|
|
|
|
static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
|
|
struct nfs_pageio_descriptor *desc)
|
|
{
|
|
hdr->netfs = desc->pg_netfs;
|
|
}
|
|
static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
|
|
struct nfs_pgio_header *hdr)
|
|
{
|
|
desc->pg_netfs = hdr->netfs;
|
|
}
|
|
static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc)
|
|
{
|
|
desc->pg_netfs = NULL;
|
|
}
|
|
#else /* CONFIG_NFS_FSCACHE */
|
|
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) {}
|
|
static inline void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) {}
|
|
static inline void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) {}
|
|
static inline int nfs_netfs_folio_unlock(struct folio *folio)
|
|
{
|
|
return 1;
|
|
}
|
|
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
|
|
|
|
static inline void nfs_fscache_init_inode(struct inode *inode) {}
|
|
static inline void nfs_fscache_clear_inode(struct inode *inode) {}
|
|
static inline void nfs_fscache_open_file(struct inode *inode,
|
|
struct file *filp) {}
|
|
static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
|
|
static inline int nfs_netfs_readahead(struct readahead_control *ractl)
|
|
{
|
|
return -ENOBUFS;
|
|
}
|
|
static inline int nfs_netfs_read_folio(struct file *file, struct folio *folio)
|
|
{
|
|
return -ENOBUFS;
|
|
}
|
|
|
|
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
|
|
{
|
|
return true; /* may release folio */
|
|
}
|
|
static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {}
|
|
|
|
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
|
|
{
|
|
return "no ";
|
|
}
|
|
static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
|
|
struct nfs_pageio_descriptor *desc) {}
|
|
static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
|
|
struct nfs_pgio_header *hdr) {}
|
|
static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc) {}
|
|
#endif /* CONFIG_NFS_FSCACHE */
|
|
#endif /* _NFS_FSCACHE_H */
|