linux/fs/nfs/read.c
Thorsten Blum e343678ee9 nfs: Remove unnecessary NULL check before kfree()
Since kfree() already checks if its argument is NULL, an additional
check before calling kfree() is unnecessary and can be removed.

Remove it and thus also the following Coccinelle/coccicheck warning
reported by ifnullfree.cocci:

  WARNING: NULL check before some freeing functions is not needed

Reviewed-by: Benjamin Coddington <bcodding@redhat.com>
Signed-off-by: Thorsten Blum <thorsten.blum@toblux.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
2024-09-23 15:03:12 -04:00

476 lines
12 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/fs/nfs/read.c
*
* Block I/O for NFS
*
* Partial copy of Linus' read cache modifications to fs/nfs/file.c
* modified for async RPC by okir@monad.swb.de
*/
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
#include "nfs4_fs.h"
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
#include "pnfs.h"
#include "nfstrace.h"
#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
static const struct nfs_rw_ops nfs_rw_read_ops;
static struct kmem_cache *nfs_rdata_cachep;
static struct nfs_pgio_header *nfs_readhdr_alloc(void)
{
struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
if (p)
p->rw_mode = FMODE_READ;
return p;
}
static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
{
kfree(rhdr->res.scratch);
kmem_cache_free(nfs_rdata_cachep, rhdr);
}
static int nfs_return_empty_folio(struct folio *folio)
{
folio_zero_segment(folio, 0, folio_size(folio));
folio_mark_uptodate(folio);
folio_unlock(folio);
return 0;
}
void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_server *server = NFS_SERVER(inode);
const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
#ifdef CONFIG_NFS_V4_1
if (server->pnfs_curr_ld && !force_mds)
pg_ops = server->pnfs_curr_ld->pg_read_ops;
#endif
nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
server->rsize, 0);
}
EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
{
struct nfs_pgio_mirror *pgm;
unsigned long npages;
nfs_pageio_complete(pgio);
/* It doesn't make sense to do mirrored reads! */
WARN_ON_ONCE(pgio->pg_mirror_count != 1);
pgm = &pgio->pg_mirrors[0];
NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
}
void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
{
struct nfs_pgio_mirror *mirror;
if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
pgio->pg_ops->pg_cleanup(pgio);
pgio->pg_ops = &nfs_pgio_rw_ops;
/* read path should never have more than one mirror */
WARN_ON_ONCE(pgio->pg_mirror_count != 1);
mirror = &pgio->pg_mirrors[0];
mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
{
WARN_ON(hdr->res.scratch != NULL);
hdr->res.scratch = kmalloc(size, GFP_KERNEL);
return hdr->res.scratch != NULL;
}
EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
static void nfs_readpage_release(struct nfs_page *req, int error)
{
struct folio *folio = nfs_page_to_folio(req);
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
if (nfs_netfs_folio_unlock(folio))
folio_unlock(folio);
nfs_release_request(req);
}
static void nfs_page_group_set_uptodate(struct nfs_page *req)
{
if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
folio_mark_uptodate(nfs_page_to_folio(req));
}
static void nfs_read_completion(struct nfs_pgio_header *hdr)
{
unsigned long bytes = 0;
int error;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
goto out;
while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct folio *folio = nfs_page_to_folio(req);
unsigned long start = req->wb_pgbase;
unsigned long end = req->wb_pgbase + req->wb_bytes;
if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
/* note: regions of the page not covered by a
* request are zeroed in nfs_read_add_folio
*/
if (bytes > hdr->good_bytes) {
/* nothing in this request was good, so zero
* the full extent of the request */
folio_zero_segment(folio, start, end);
} else if (hdr->good_bytes - bytes < req->wb_bytes) {
/* part of this request has good bytes, but
* not all. zero the bad bytes */
start += hdr->good_bytes - bytes;
WARN_ON(start < req->wb_pgbase);
folio_zero_segment(folio, start, end);
}
}
error = 0;
bytes += req->wb_bytes;
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
if (bytes <= hdr->good_bytes)
nfs_page_group_set_uptodate(req);
else {
error = hdr->error;
xchg(&nfs_req_openctx(req)->error, error);
}
} else
nfs_page_group_set_uptodate(req);
nfs_list_remove_request(req);
nfs_readpage_release(req, error);
}
nfs_netfs_read_completion(hdr);
out:
hdr->release(hdr);
}
static void nfs_initiate_read(struct nfs_pgio_header *hdr,
struct rpc_message *msg,
const struct nfs_rpc_ops *rpc_ops,
struct rpc_task_setup *task_setup_data, int how)
{
rpc_ops->read_setup(hdr, msg);
nfs_netfs_initiate_read(hdr);
trace_nfs_initiate_read(hdr);
}
static void
nfs_async_read_error(struct list_head *head, int error)
{
struct nfs_page *req;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_readpage_release(req, error);
}
}
const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
.error_cleanup = nfs_async_read_error,
.completion = nfs_read_completion,
};
/*
* This is the callback from RPC telling us whether a reply was
* received or some error occurred (timeout or socket shutdown).
*/
static int nfs_readpage_done(struct rpc_task *task,
struct nfs_pgio_header *hdr,
struct inode *inode)
{
int status = NFS_PROTO(inode)->read_done(task, hdr);
if (status != 0)
return status;
nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
trace_nfs_readpage_done(task, hdr);
if (task->tk_status == -ESTALE) {
nfs_set_inode_stale(inode);
nfs_mark_for_revalidate(inode);
}
return 0;
}
static void nfs_readpage_retry(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
struct nfs_pgio_args *argp = &hdr->args;
struct nfs_pgio_res *resp = &hdr->res;
/* This is a short read! */
nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
trace_nfs_readpage_short(task, hdr);
/* Has the server at least made some progress? */
if (resp->count == 0) {
nfs_set_pgio_error(hdr, -EIO, argp->offset);
return;
}
/* For non rpc-based layout drivers, retry-through-MDS */
if (!task->tk_ops) {
hdr->pnfs_error = -EAGAIN;
return;
}
/* Yes, so retry the read at the end of the hdr */
hdr->mds_offset += resp->count;
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
resp->count = 0;
resp->eof = 0;
rpc_restart_call_prepare(task);
}
static void nfs_readpage_result(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
if (hdr->res.eof) {
loff_t pos = hdr->args.offset + hdr->res.count;
unsigned int new = pos - hdr->io_start;
if (hdr->good_bytes > new) {
hdr->good_bytes = new;
set_bit(NFS_IOHDR_EOF, &hdr->flags);
clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
}
} else if (hdr->res.count < hdr->args.count)
nfs_readpage_retry(task, hdr);
}
int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
struct nfs_open_context *ctx,
struct folio *folio)
{
struct inode *inode = folio->mapping->host;
struct nfs_server *server = NFS_SERVER(inode);
size_t fsize = folio_size(folio);
unsigned int rsize = server->rsize;
struct nfs_page *new;
unsigned int len, aligned_len;
int error;
len = nfs_folio_length(folio);
if (len == 0)
return nfs_return_empty_folio(folio);
aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
if (IS_ERR(new)) {
error = PTR_ERR(new);
if (nfs_netfs_folio_unlock(folio))
folio_unlock(folio);
goto out;
}
if (len < fsize)
folio_zero_segment(folio, len, fsize);
if (!nfs_pageio_add_request(pgio, new)) {
nfs_list_remove_request(new);
error = pgio->pg_error;
nfs_readpage_release(new, error);
goto out;
}
return 0;
out:
return error;
}
/*
* Actually read a folio over the wire.
*/
static int nfs_do_read_folio(struct file *file, struct folio *folio)
{
struct inode *inode = file_inode(file);
struct nfs_pageio_descriptor pgio;
struct nfs_open_context *ctx;
int ret;
ctx = get_nfs_open_context(nfs_file_open_context(file));
xchg(&ctx->error, 0);
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
ret = nfs_read_add_folio(&pgio, ctx, folio);
if (ret)
goto out_put;
nfs_pageio_complete_read(&pgio);
nfs_update_delegated_atime(inode);
if (pgio.pg_error < 0) {
ret = pgio.pg_error;
goto out_put;
}
ret = folio_wait_locked_killable(folio);
if (!folio_test_uptodate(folio) && !ret)
ret = xchg(&ctx->error, 0);
out_put:
put_nfs_open_context(ctx);
return ret;
}
/*
* Synchronously read a folio.
*
* This is not heavily used as most users to try an asynchronous
* large read through ->readahead first.
*/
int nfs_read_folio(struct file *file, struct folio *folio)
{
struct inode *inode = file_inode(file);
loff_t pos = folio_pos(folio);
size_t len = folio_size(folio);
int ret;
trace_nfs_aop_readpage(inode, pos, len);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
task_io_account_read(len);
/*
* Try to flush any pending writes to the file..
*
* NOTE! Because we own the folio lock, there cannot
* be any new pending writes generated at this point
* for this folio (other folios can be written to).
*/
ret = nfs_wb_folio(inode, folio);
if (ret)
goto out_unlock;
if (folio_test_uptodate(folio))
goto out_unlock;
ret = -ESTALE;
if (NFS_STALE(inode))
goto out_unlock;
ret = nfs_netfs_read_folio(file, folio);
if (ret)
ret = nfs_do_read_folio(file, folio);
out:
trace_nfs_aop_readpage_done(inode, pos, len, ret);
return ret;
out_unlock:
folio_unlock(folio);
goto out;
}
void nfs_readahead(struct readahead_control *ractl)
{
struct nfs_pageio_descriptor pgio;
struct nfs_open_context *ctx;
unsigned int nr_pages = readahead_count(ractl);
struct file *file = ractl->file;
struct inode *inode = ractl->mapping->host;
struct folio *folio;
int ret;
trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
task_io_account_read(readahead_length(ractl));
ret = -ESTALE;
if (NFS_STALE(inode))
goto out;
ret = nfs_netfs_readahead(ractl);
if (!ret)
goto out;
if (file == NULL) {
ret = -EBADF;
ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
if (ctx == NULL)
goto out;
} else
ctx = get_nfs_open_context(nfs_file_open_context(file));
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
while ((folio = readahead_folio(ractl)) != NULL) {
ret = nfs_read_add_folio(&pgio, ctx, folio);
if (ret)
break;
}
nfs_pageio_complete_read(&pgio);
nfs_update_delegated_atime(inode);
put_nfs_open_context(ctx);
out:
trace_nfs_aop_readahead_done(inode, nr_pages, ret);
}
int __init nfs_init_readpagecache(void)
{
nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
sizeof(struct nfs_pgio_header),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_rdata_cachep == NULL)
return -ENOMEM;
return 0;
}
void nfs_destroy_readpagecache(void)
{
kmem_cache_destroy(nfs_rdata_cachep);
}
static const struct nfs_rw_ops nfs_rw_read_ops = {
.rw_alloc_header = nfs_readhdr_alloc,
.rw_free_header = nfs_readhdr_free,
.rw_done = nfs_readpage_done,
.rw_result = nfs_readpage_result,
.rw_initiate = nfs_initiate_read,
};