mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
NFS Client Updates for Linux 6.1
- New Features: - Add NFSv4.2 xattr tracepoints - Replace xprtiod WQ in rpcrdma - Flexfiles cancels I/O on layout recall or revoke - Bugfixes and Cleanups: - Directly use ida_alloc() / ida_free() - Don't open-code max_t() - Prefer using strscpy over strlcpy - Remove unused forward declarations - Always return layout states on flexfiles layout return - Have LISTXATTR treat NFS4ERR_NOXATTR as an empty reply instead of error - Allow more xprtrdma memory allocations to fail without triggering a reclaim - Various other xprtrdma clean ups - Fix rpc_killall_tasks() races -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEnZ5MQTpR7cLU7KEp18tUv7ClQOsFAmNHJToACgkQ18tUv7Cl QOtTbA//QiresBzf7cnZOAwiZbe9LXiWfR2p5IkBLJPYJ8xtTliRLwnwYgQib9OI +4DzBiEqujah9BDac5OeatYW1UDLQ9lMIoCyvPjSw8Yxa8JEHDb/1ODDUOMS+ZIo dk1AKV2Wi2stxn85Sy+VGriE3JKiaeJxAlsWgiT/BLP0hAyZw1L3Tg017EgxVIVz 8cfPBciu/Bc2/pZp9f5+GBjAlcUX0u/JFKiLPDHDZkvFTr4RgREZOyStDWncgsxK iHAIfSr6TxlynHabNAnFNVuYq7gkBe3jg1TkABdQ+SilAgdLpugAW8MFdig0AZQO UIsVJHjRHLpz6cJurnDcu9tGB6jLVTZfyz8PZQl5H9CqnbSHUxdOCTuve7fGhVas +wSXq1U98gStzoqtw5pMwsB2YSSOsUR8QEZpLEkvQgzHwoszNa7FrELqaZUJyJHR qmRH2nKCzsSBbQn5AhnzHBxzeOv6r0r3YjvKd5utwsRtq3g9GX14KAOmqvDTKk2q 9KmrGlDVtVmOww2QnPTXH6mSthHLuqcKg1H2H7Xymmskq9n8PC6M+EiQd8XsKNJa MfBkOVFdxrJq6Htpx4IMLJP6jvYVKEbef2eRFt8hNnla8pMPlsDqoIysJulaWpiB HqdoPHR9Y26Qxuw7G91ba5Q5qqu9+ZOLB9jeSRjcXtsUDxq9f/A= =p47k -----END PGP SIGNATURE----- Merge tag 'nfs-for-6.1-1' of git://git.linux-nfs.org/projects/anna/linux-nfs Pull NFS client updates from Anna Schumaker: "New Features: - Add NFSv4.2 xattr tracepoints - Replace xprtiod WQ in rpcrdma - Flexfiles cancels I/O on layout recall or revoke Bugfixes and Cleanups: - Directly use ida_alloc() / ida_free() - Don't open-code max_t() - Prefer using strscpy over strlcpy - Remove unused forward declarations - Always return layout states on flexfiles layout return - Have LISTXATTR treat NFS4ERR_NOXATTR as an empty reply instead of error - Allow more xprtrdma memory allocations to fail without triggering a reclaim - Various other xprtrdma clean ups - Fix rpc_killall_tasks() races" * tag 'nfs-for-6.1-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (27 commits) NFSv4/flexfiles: Cancel I/O if the layout is recalled or revoked SUNRPC: Add API to force the client to disconnect SUNRPC: Add a helper to allow pNFS drivers to selectively cancel RPC calls SUNRPC: Fix races with rpc_killall_tasks() xprtrdma: Fix uninitialized variable xprtrdma: Prevent memory allocations from driving a reclaim xprtrdma: Memory allocation should be allowed to fail during connect xprtrdma: MR-related memory allocation should be allowed to fail xprtrdma: Clean up synopsis of rpcrdma_regbuf_alloc() xprtrdma: Clean up synopsis of rpcrdma_req_create() svcrdma: Clean up RPCRDMA_DEF_GFP SUNRPC: Replace the use of the xprtiod WQ in rpcrdma NFSv4.2: Add a tracepoint for listxattr NFSv4.2: Add tracepoints for getxattr, setxattr, and removexattr NFSv4.2: Move TRACE_DEFINE_ENUM(NFS4_CONTENT_*) under CONFIG_NFS_V4_2 NFSv4.2: Add special handling for LISTXATTR receiving NFS4ERR_NOXATTR nfs: remove nfs_wait_atomic_killable() and nfs_write_prepare() declaration NFSv4: remove nfs4_renewd_prepare_shutdown() declaration fs/nfs/pnfs_nfs.c: fix spelling typo and syntax error in comment NFSv4/pNFS: Always return layout stats on layout return for flexfiles ...
This commit is contained in:
commit
66b8345585
@ -656,9 +656,9 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
goto out;
|
||||
}
|
||||
if (mntflags & NFS_MOUNT_WRITE_WAIT) {
|
||||
result = filemap_fdatawait_range(file->f_mapping,
|
||||
iocb->ki_pos - written,
|
||||
iocb->ki_pos - 1);
|
||||
filemap_fdatawait_range(file->f_mapping,
|
||||
iocb->ki_pos - written,
|
||||
iocb->ki_pos - 1);
|
||||
}
|
||||
result = generic_write_sync(iocb, written);
|
||||
if (result < 0)
|
||||
|
@ -30,14 +30,20 @@
|
||||
#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
|
||||
#define FF_LAYOUTRETURN_MAXERR 20
|
||||
|
||||
enum nfs4_ff_op_type {
|
||||
NFS4_FF_OP_LAYOUTSTATS,
|
||||
NFS4_FF_OP_LAYOUTRETURN,
|
||||
};
|
||||
|
||||
static unsigned short io_maxretrans;
|
||||
|
||||
static const struct pnfs_commit_ops ff_layout_commit_ops;
|
||||
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
|
||||
struct nfs_pgio_header *hdr);
|
||||
static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
|
||||
static int
|
||||
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
|
||||
struct nfs42_layoutstat_devinfo *devinfo,
|
||||
int dev_limit);
|
||||
int dev_limit, enum nfs4_ff_op_type type);
|
||||
static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
|
||||
const struct nfs42_layoutstat_devinfo *devinfo,
|
||||
struct nfs4_ff_layout_mirror *mirror);
|
||||
@ -1373,6 +1379,11 @@ static int ff_layout_read_prepare_common(struct rpc_task *task,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!pnfs_is_valid_lseg(hdr->lseg)) {
|
||||
rpc_exit(task, -EAGAIN);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
ff_layout_read_record_layoutstats_start(task, hdr);
|
||||
return 0;
|
||||
}
|
||||
@ -1553,6 +1564,11 @@ static int ff_layout_write_prepare_common(struct rpc_task *task,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!pnfs_is_valid_lseg(hdr->lseg)) {
|
||||
rpc_exit(task, -EAGAIN);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
ff_layout_write_record_layoutstats_start(task, hdr);
|
||||
return 0;
|
||||
}
|
||||
@ -1645,15 +1661,23 @@ static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
|
||||
set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
|
||||
}
|
||||
|
||||
static void ff_layout_commit_prepare_common(struct rpc_task *task,
|
||||
struct nfs_commit_data *cdata)
|
||||
static int ff_layout_commit_prepare_common(struct rpc_task *task,
|
||||
struct nfs_commit_data *cdata)
|
||||
{
|
||||
if (!pnfs_is_valid_lseg(cdata->lseg)) {
|
||||
rpc_exit(task, -EAGAIN);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
ff_layout_commit_record_layoutstats_start(task, cdata);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
|
||||
{
|
||||
ff_layout_commit_prepare_common(task, data);
|
||||
if (ff_layout_commit_prepare_common(task, data))
|
||||
return;
|
||||
|
||||
rpc_call_start(task);
|
||||
}
|
||||
|
||||
@ -1949,6 +1973,65 @@ ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
|
||||
ff_layout_initiate_commit);
|
||||
}
|
||||
|
||||
static bool ff_layout_match_rw(const struct rpc_task *task,
|
||||
const struct nfs_pgio_header *hdr,
|
||||
const struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
return hdr->lseg == lseg;
|
||||
}
|
||||
|
||||
static bool ff_layout_match_commit(const struct rpc_task *task,
|
||||
const struct nfs_commit_data *cdata,
|
||||
const struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
return cdata->lseg == lseg;
|
||||
}
|
||||
|
||||
static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
|
||||
{
|
||||
const struct rpc_call_ops *ops = task->tk_ops;
|
||||
|
||||
if (ops == &ff_layout_read_call_ops_v3 ||
|
||||
ops == &ff_layout_read_call_ops_v4 ||
|
||||
ops == &ff_layout_write_call_ops_v3 ||
|
||||
ops == &ff_layout_write_call_ops_v4)
|
||||
return ff_layout_match_rw(task, task->tk_calldata, data);
|
||||
if (ops == &ff_layout_commit_call_ops_v3 ||
|
||||
ops == &ff_layout_commit_call_ops_v4)
|
||||
return ff_layout_match_commit(task, task->tk_calldata, data);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
|
||||
struct nfs4_ff_layout_mirror *mirror;
|
||||
struct nfs4_ff_layout_ds *mirror_ds;
|
||||
struct nfs4_pnfs_ds *ds;
|
||||
struct nfs_client *ds_clp;
|
||||
struct rpc_clnt *clnt;
|
||||
u32 idx;
|
||||
|
||||
for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
|
||||
mirror = flseg->mirror_array[idx];
|
||||
mirror_ds = mirror->mirror_ds;
|
||||
if (!mirror_ds)
|
||||
continue;
|
||||
ds = mirror->mirror_ds->ds;
|
||||
if (!ds)
|
||||
continue;
|
||||
ds_clp = ds->ds_clp;
|
||||
if (!ds_clp)
|
||||
continue;
|
||||
clnt = ds_clp->cl_rpcclient;
|
||||
if (!clnt)
|
||||
continue;
|
||||
if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
|
||||
continue;
|
||||
rpc_clnt_disconnect(clnt);
|
||||
}
|
||||
}
|
||||
|
||||
static struct pnfs_ds_commit_info *
|
||||
ff_layout_get_ds_info(struct inode *inode)
|
||||
{
|
||||
@ -2161,8 +2244,9 @@ ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
|
||||
FF_LAYOUTRETURN_MAXERR);
|
||||
|
||||
spin_lock(&args->inode->i_lock);
|
||||
ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
|
||||
&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
|
||||
ff_args->num_dev = ff_layout_mirror_prepare_stats(
|
||||
&ff_layout->generic_hdr, &ff_args->devinfo[0],
|
||||
ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
|
||||
spin_unlock(&args->inode->i_lock);
|
||||
|
||||
args->ld_private->ops = &layoutreturn_ops;
|
||||
@ -2396,7 +2480,7 @@ static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
|
||||
static int
|
||||
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
|
||||
struct nfs42_layoutstat_devinfo *devinfo,
|
||||
int dev_limit)
|
||||
int dev_limit, enum nfs4_ff_op_type type)
|
||||
{
|
||||
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
|
||||
struct nfs4_ff_layout_mirror *mirror;
|
||||
@ -2408,7 +2492,9 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
|
||||
break;
|
||||
if (IS_ERR_OR_NULL(mirror->mirror_ds))
|
||||
continue;
|
||||
if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
|
||||
if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
|
||||
&mirror->flags) &&
|
||||
type != NFS4_FF_OP_LAYOUTRETURN)
|
||||
continue;
|
||||
/* mirror refcount put in cleanup_layoutstats */
|
||||
if (!refcount_inc_not_zero(&mirror->ref))
|
||||
@ -2448,7 +2534,9 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
|
||||
spin_lock(&args->inode->i_lock);
|
||||
ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
|
||||
args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
|
||||
&args->devinfo[0], dev_count);
|
||||
&args->devinfo[0],
|
||||
dev_count,
|
||||
NFS4_FF_OP_LAYOUTSTATS);
|
||||
spin_unlock(&args->inode->i_lock);
|
||||
if (!args->num_dev) {
|
||||
kfree(args->devinfo);
|
||||
@ -2501,6 +2589,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
|
||||
.prepare_layoutreturn = ff_layout_prepare_layoutreturn,
|
||||
.sync = pnfs_nfs_generic_sync,
|
||||
.prepare_layoutstats = ff_layout_prepare_layoutstats,
|
||||
.cancel_io = ff_layout_cancel_io,
|
||||
};
|
||||
|
||||
static int __init nfs4flexfilelayout_init(void)
|
||||
|
@ -313,7 +313,7 @@ struct nfs_find_desc {
|
||||
static int
|
||||
nfs_find_actor(struct inode *inode, void *opaque)
|
||||
{
|
||||
struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
|
||||
struct nfs_find_desc *desc = opaque;
|
||||
struct nfs_fh *fh = desc->fh;
|
||||
struct nfs_fattr *fattr = desc->fattr;
|
||||
|
||||
@ -331,7 +331,7 @@ nfs_find_actor(struct inode *inode, void *opaque)
|
||||
static int
|
||||
nfs_init_locked(struct inode *inode, void *opaque)
|
||||
{
|
||||
struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
|
||||
struct nfs_find_desc *desc = opaque;
|
||||
struct nfs_fattr *fattr = desc->fattr;
|
||||
|
||||
set_nfs_fileid(inode, fattr->fileid);
|
||||
@ -2267,7 +2267,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
|
||||
|
||||
static void init_once(void *foo)
|
||||
{
|
||||
struct nfs_inode *nfsi = (struct nfs_inode *) foo;
|
||||
struct nfs_inode *nfsi = foo;
|
||||
|
||||
inode_init_once(&nfsi->vfs_inode);
|
||||
INIT_LIST_HEAD(&nfsi->open_files);
|
||||
|
@ -435,7 +435,6 @@ extern void nfs_zap_acl_cache(struct inode *inode);
|
||||
extern void nfs_set_cache_invalid(struct inode *inode, unsigned long flags);
|
||||
extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
|
||||
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
|
||||
extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
|
||||
|
||||
/* super.c */
|
||||
extern const struct super_operations nfs_sops;
|
||||
@ -503,7 +502,6 @@ extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
|
||||
const struct nfs_pgio_completion_ops *compl_ops);
|
||||
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
|
||||
extern void nfs_commit_free(struct nfs_commit_data *p);
|
||||
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
|
||||
extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
|
||||
extern int nfs_initiate_commit(struct rpc_clnt *clnt,
|
||||
struct nfs_commit_data *data,
|
||||
|
@ -1175,6 +1175,7 @@ static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
|
||||
|
||||
ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
|
||||
&res.seq_res, 1);
|
||||
trace_nfs4_removexattr(inode, name, ret);
|
||||
if (!ret)
|
||||
nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
|
||||
|
||||
@ -1214,6 +1215,7 @@ static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
|
||||
|
||||
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
|
||||
&res.seq_res, 1);
|
||||
trace_nfs4_setxattr(inode, name, ret);
|
||||
|
||||
for (; np > 0; np--)
|
||||
put_page(pages[np - 1]);
|
||||
@ -1246,6 +1248,7 @@ static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
|
||||
|
||||
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
|
||||
&res.seq_res, 0);
|
||||
trace_nfs4_getxattr(inode, name, ret);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -1317,6 +1320,7 @@ static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
|
||||
|
||||
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
|
||||
&res.seq_res, 0);
|
||||
trace_nfs4_listxattr(inode, ret);
|
||||
|
||||
if (ret >= 0) {
|
||||
ret = res.copied;
|
||||
|
@ -981,7 +981,7 @@ nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
|
||||
static void nfs4_xattr_cache_init_once(void *p)
|
||||
{
|
||||
struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p;
|
||||
struct nfs4_xattr_cache *cache = p;
|
||||
|
||||
spin_lock_init(&cache->listxattr_lock);
|
||||
atomic_long_set(&cache->nent, 0);
|
||||
|
@ -569,6 +569,14 @@ static int decode_listxattrs(struct xdr_stream *xdr,
|
||||
*/
|
||||
if (status == -ETOOSMALL)
|
||||
status = -ERANGE;
|
||||
/*
|
||||
* Special case: for LISTXATTRS, NFS4ERR_NOXATTR
|
||||
* should be translated to success with zero-length reply.
|
||||
*/
|
||||
if (status == -ENODATA) {
|
||||
res->eof = true;
|
||||
status = 0;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -459,7 +459,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *);
|
||||
|
||||
/* nfs4renewd.c */
|
||||
extern void nfs4_schedule_state_renewal(struct nfs_client *);
|
||||
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
|
||||
extern void nfs4_kill_renewd(struct nfs_client *);
|
||||
extern void nfs4_renew_state(struct work_struct *);
|
||||
extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
|
||||
|
@ -254,7 +254,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
|
||||
goto error;
|
||||
ip_addr = (const char *)buf;
|
||||
}
|
||||
strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
|
||||
strscpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
|
||||
|
||||
err = nfs_idmap_new(clp);
|
||||
if (err < 0) {
|
||||
|
@ -583,7 +583,7 @@ static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
|
||||
struct request_key_auth *rka = get_request_key_auth(authkey);
|
||||
struct rpc_pipe_msg *msg;
|
||||
struct idmap_msg *im;
|
||||
struct idmap *idmap = (struct idmap *)aux;
|
||||
struct idmap *idmap = aux;
|
||||
struct key *key = rka->target_key;
|
||||
int ret = -ENOKEY;
|
||||
|
||||
|
@ -6608,7 +6608,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
|
||||
struct nfs4_delegreturndata *d_data;
|
||||
struct pnfs_layout_hdr *lo;
|
||||
|
||||
d_data = (struct nfs4_delegreturndata *)data;
|
||||
d_data = data;
|
||||
|
||||
if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
|
||||
nfs4_sequence_done(task, &d_data->res.seq_res);
|
||||
@ -8900,7 +8900,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
|
||||
void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
|
||||
void *data)
|
||||
{
|
||||
struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
|
||||
struct nfs4_add_xprt_data *adata = data;
|
||||
struct rpc_task *task;
|
||||
int status;
|
||||
|
||||
|
@ -497,8 +497,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
|
||||
sp = kzalloc(sizeof(*sp), gfp_flags);
|
||||
if (!sp)
|
||||
return NULL;
|
||||
sp->so_seqid.owner_id = ida_simple_get(&server->openowner_id, 0, 0,
|
||||
gfp_flags);
|
||||
sp->so_seqid.owner_id = ida_alloc(&server->openowner_id, gfp_flags);
|
||||
if (sp->so_seqid.owner_id < 0) {
|
||||
kfree(sp);
|
||||
return NULL;
|
||||
@ -534,7 +533,7 @@ static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
|
||||
{
|
||||
nfs4_destroy_seqid_counter(&sp->so_seqid);
|
||||
put_cred(sp->so_cred);
|
||||
ida_simple_remove(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
|
||||
ida_free(&sp->so_server->openowner_id, sp->so_seqid.owner_id);
|
||||
kfree(sp);
|
||||
}
|
||||
|
||||
@ -877,8 +876,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
|
||||
refcount_set(&lsp->ls_count, 1);
|
||||
lsp->ls_state = state;
|
||||
lsp->ls_owner = fl_owner;
|
||||
lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id,
|
||||
0, 0, GFP_KERNEL_ACCOUNT);
|
||||
lsp->ls_seqid.owner_id = ida_alloc(&server->lockowner_id, GFP_KERNEL_ACCOUNT);
|
||||
if (lsp->ls_seqid.owner_id < 0)
|
||||
goto out_free;
|
||||
INIT_LIST_HEAD(&lsp->ls_locks);
|
||||
@ -890,7 +888,7 @@ out_free:
|
||||
|
||||
void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
|
||||
{
|
||||
ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
|
||||
ida_free(&server->lockowner_id, lsp->ls_seqid.owner_id);
|
||||
nfs4_destroy_seqid_counter(&lsp->ls_seqid);
|
||||
kfree(lsp);
|
||||
}
|
||||
|
@ -2097,6 +2097,7 @@ TRACE_EVENT(ff_layout_commit_error,
|
||||
)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_NFS_V4_2
|
||||
TRACE_DEFINE_ENUM(NFS4_CONTENT_DATA);
|
||||
TRACE_DEFINE_ENUM(NFS4_CONTENT_HOLE);
|
||||
|
||||
@ -2105,7 +2106,6 @@ TRACE_DEFINE_ENUM(NFS4_CONTENT_HOLE);
|
||||
{ NFS4_CONTENT_DATA, "DATA" }, \
|
||||
{ NFS4_CONTENT_HOLE, "HOLE" })
|
||||
|
||||
#ifdef CONFIG_NFS_V4_2
|
||||
TRACE_EVENT(nfs4_llseek,
|
||||
TP_PROTO(
|
||||
const struct inode *inode,
|
||||
@ -2496,6 +2496,54 @@ TRACE_EVENT(nfs4_offload_cancel,
|
||||
__entry->stateid_seq, __entry->stateid_hash
|
||||
)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(nfs4_xattr_event,
|
||||
TP_PROTO(
|
||||
const struct inode *inode,
|
||||
const char *name,
|
||||
int error
|
||||
),
|
||||
|
||||
TP_ARGS(inode, name, error),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, error)
|
||||
__field(dev_t, dev)
|
||||
__field(u32, fhandle)
|
||||
__field(u64, fileid)
|
||||
__string(name, name)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->error = error < 0 ? -error : 0;
|
||||
__entry->dev = inode->i_sb->s_dev;
|
||||
__entry->fileid = NFS_FILEID(inode);
|
||||
__entry->fhandle = nfs_fhandle_hash(NFS_FH(inode));
|
||||
__assign_str(name, name);
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x "
|
||||
"name=%s",
|
||||
-__entry->error, show_nfs4_status(__entry->error),
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long long)__entry->fileid,
|
||||
__entry->fhandle, __get_str(name)
|
||||
)
|
||||
);
|
||||
#define DEFINE_NFS4_XATTR_EVENT(name) \
|
||||
DEFINE_EVENT(nfs4_xattr_event, name, \
|
||||
TP_PROTO( \
|
||||
const struct inode *inode, \
|
||||
const char *name, \
|
||||
int error \
|
||||
), \
|
||||
TP_ARGS(inode, name, error))
|
||||
DEFINE_NFS4_XATTR_EVENT(nfs4_getxattr);
|
||||
DEFINE_NFS4_XATTR_EVENT(nfs4_setxattr);
|
||||
DEFINE_NFS4_XATTR_EVENT(nfs4_removexattr);
|
||||
|
||||
DEFINE_NFS4_INODE_EVENT(nfs4_listxattr);
|
||||
#endif /* CONFIG_NFS_V4_2 */
|
||||
|
||||
#endif /* CONFIG_NFS_V4_1 */
|
||||
|
@ -139,7 +139,7 @@ static int __init nfs_root_setup(char *line)
|
||||
ROOT_DEV = Root_NFS;
|
||||
|
||||
if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
|
||||
strlcpy(nfs_root_parms, line, sizeof(nfs_root_parms));
|
||||
strscpy(nfs_root_parms, line, sizeof(nfs_root_parms));
|
||||
} else {
|
||||
size_t n = strlen(line) + sizeof(NFS_ROOT) - 1;
|
||||
if (n >= sizeof(nfs_root_parms))
|
||||
|
@ -710,6 +710,7 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
|
||||
u32 seq)
|
||||
{
|
||||
struct pnfs_layout_segment *lseg, *next;
|
||||
struct nfs_server *server = NFS_SERVER(lo->plh_inode);
|
||||
int remaining = 0;
|
||||
|
||||
dprintk("%s:Begin lo %p\n", __func__, lo);
|
||||
@ -722,8 +723,10 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
|
||||
"offset %llu length %llu\n", __func__,
|
||||
lseg, lseg->pls_range.iomode, lseg->pls_seq,
|
||||
lseg->pls_range.offset, lseg->pls_range.length);
|
||||
if (!mark_lseg_invalid(lseg, tmp_list))
|
||||
remaining++;
|
||||
if (mark_lseg_invalid(lseg, tmp_list))
|
||||
continue;
|
||||
remaining++;
|
||||
pnfs_lseg_cancel_io(server, lseg);
|
||||
}
|
||||
dprintk("%s:Return %i\n", __func__, remaining);
|
||||
return remaining;
|
||||
@ -2485,6 +2488,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
|
||||
u32 seq)
|
||||
{
|
||||
struct pnfs_layout_segment *lseg, *next;
|
||||
struct nfs_server *server = NFS_SERVER(lo->plh_inode);
|
||||
int remaining = 0;
|
||||
|
||||
dprintk("%s:Begin lo %p\n", __func__, lo);
|
||||
@ -2507,6 +2511,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
|
||||
continue;
|
||||
remaining++;
|
||||
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
|
||||
pnfs_lseg_cancel_io(server, lseg);
|
||||
}
|
||||
|
||||
if (remaining) {
|
||||
|
@ -169,6 +169,8 @@ struct pnfs_layoutdriver_type {
|
||||
void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data);
|
||||
int (*prepare_layoutcommit) (struct nfs4_layoutcommit_args *args);
|
||||
int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args);
|
||||
|
||||
void (*cancel_io)(struct pnfs_layout_segment *lseg);
|
||||
};
|
||||
|
||||
struct pnfs_commit_ops {
|
||||
@ -685,6 +687,13 @@ pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page
|
||||
req_offset(req), req_last);
|
||||
}
|
||||
|
||||
static inline void pnfs_lseg_cancel_io(struct nfs_server *server,
|
||||
struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
if (server->pnfs_curr_ld->cancel_io)
|
||||
server->pnfs_curr_ld->cancel_io(lseg);
|
||||
}
|
||||
|
||||
extern unsigned int layoutstats_timer;
|
||||
|
||||
#ifdef NFS_DEBUG
|
||||
|
@ -374,12 +374,12 @@ pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head reqest
|
||||
/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head request
|
||||
* for @page
|
||||
* @cinfo - commit info for current inode
|
||||
* @page - page to search for matching head request
|
||||
*
|
||||
* Returns a the head request if one is found, otherwise returns NULL.
|
||||
* Return: the head request if one is found, otherwise %NULL.
|
||||
*/
|
||||
struct nfs_page *
|
||||
pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
|
||||
|
@ -246,6 +246,7 @@ void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
|
||||
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
|
||||
const struct sockaddr *sap);
|
||||
void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt);
|
||||
void rpc_clnt_disconnect(struct rpc_clnt *clnt);
|
||||
void rpc_cleanup_clids(void);
|
||||
|
||||
static inline int rpc_reply_expected(struct rpc_task *task)
|
||||
|
@ -209,11 +209,17 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
|
||||
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
|
||||
void rpc_put_task(struct rpc_task *);
|
||||
void rpc_put_task_async(struct rpc_task *);
|
||||
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
|
||||
void rpc_task_try_cancel(struct rpc_task *task, int error);
|
||||
void rpc_signal_task(struct rpc_task *);
|
||||
void rpc_exit_task(struct rpc_task *);
|
||||
void rpc_exit(struct rpc_task *, int);
|
||||
void rpc_release_calldata(const struct rpc_call_ops *, void *);
|
||||
void rpc_killall_tasks(struct rpc_clnt *);
|
||||
unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
|
||||
bool (*fnmatch)(const struct rpc_task *,
|
||||
const void *),
|
||||
const void *data);
|
||||
void rpc_execute(struct rpc_task *);
|
||||
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
|
||||
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
|
||||
|
@ -345,7 +345,7 @@ static int rpc_alloc_clid(struct rpc_clnt *clnt)
|
||||
{
|
||||
int clid;
|
||||
|
||||
clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL);
|
||||
clid = ida_alloc(&rpc_clids, GFP_KERNEL);
|
||||
if (clid < 0)
|
||||
return clid;
|
||||
clnt->cl_clid = clid;
|
||||
@ -354,7 +354,7 @@ static int rpc_alloc_clid(struct rpc_clnt *clnt)
|
||||
|
||||
static void rpc_free_clid(struct rpc_clnt *clnt)
|
||||
{
|
||||
ida_simple_remove(&rpc_clids, clnt->cl_clid);
|
||||
ida_free(&rpc_clids, clnt->cl_clid);
|
||||
}
|
||||
|
||||
static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
|
||||
@ -873,6 +873,57 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_killall_tasks);
|
||||
|
||||
/**
|
||||
* rpc_cancel_tasks - try to cancel a set of RPC tasks
|
||||
* @clnt: Pointer to RPC client
|
||||
* @error: RPC task error value to set
|
||||
* @fnmatch: Pointer to selector function
|
||||
* @data: User data
|
||||
*
|
||||
* Uses @fnmatch to define a set of RPC tasks that are to be cancelled.
|
||||
* The argument @error must be a negative error value.
|
||||
*/
|
||||
unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
|
||||
bool (*fnmatch)(const struct rpc_task *,
|
||||
const void *),
|
||||
const void *data)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
unsigned long count = 0;
|
||||
|
||||
if (list_empty(&clnt->cl_tasks))
|
||||
return 0;
|
||||
/*
|
||||
* Spin lock all_tasks to prevent changes...
|
||||
*/
|
||||
spin_lock(&clnt->cl_lock);
|
||||
list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
|
||||
if (!RPC_IS_ACTIVATED(task))
|
||||
continue;
|
||||
if (!fnmatch(task, data))
|
||||
continue;
|
||||
rpc_task_try_cancel(task, error);
|
||||
count++;
|
||||
}
|
||||
spin_unlock(&clnt->cl_lock);
|
||||
return count;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_cancel_tasks);
|
||||
|
||||
static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt,
|
||||
struct rpc_xprt *xprt, void *dummy)
|
||||
{
|
||||
if (xprt_connected(xprt))
|
||||
xprt_force_disconnect(xprt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rpc_clnt_disconnect(struct rpc_clnt *clnt)
|
||||
{
|
||||
rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_clnt_disconnect);
|
||||
|
||||
/*
|
||||
* Properly shut down an RPC client, terminating all outstanding
|
||||
* requests.
|
||||
@ -1642,7 +1693,7 @@ static void
|
||||
__rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
|
||||
{
|
||||
trace_rpc_call_rpcerror(task, tk_status, rpc_status);
|
||||
task->tk_rpc_status = rpc_status;
|
||||
rpc_task_set_rpc_status(task, rpc_status);
|
||||
rpc_exit(task, tk_status);
|
||||
}
|
||||
|
||||
@ -2435,10 +2486,8 @@ rpc_check_timeout(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_clnt *clnt = task->tk_client;
|
||||
|
||||
if (RPC_SIGNALLED(task)) {
|
||||
rpc_call_rpcerror(task, -ERESTARTSYS);
|
||||
if (RPC_SIGNALLED(task))
|
||||
return;
|
||||
}
|
||||
|
||||
if (xprt_adjust_timeout(task->tk_rqstp) == 0)
|
||||
return;
|
||||
|
@ -65,6 +65,13 @@ gfp_t rpc_task_gfp_mask(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
|
||||
|
||||
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
|
||||
{
|
||||
if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
rpc_task_timeout(const struct rpc_task *task)
|
||||
{
|
||||
@ -853,12 +860,25 @@ void rpc_signal_task(struct rpc_task *task)
|
||||
if (!RPC_IS_ACTIVATED(task))
|
||||
return;
|
||||
|
||||
if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
|
||||
return;
|
||||
trace_rpc_task_signalled(task, task->tk_action);
|
||||
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
|
||||
smp_mb__after_atomic();
|
||||
queue = READ_ONCE(task->tk_waitqueue);
|
||||
if (queue)
|
||||
rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
|
||||
rpc_wake_up_queued_task(queue, task);
|
||||
}
|
||||
|
||||
void rpc_task_try_cancel(struct rpc_task *task, int error)
|
||||
{
|
||||
struct rpc_wait_queue *queue;
|
||||
|
||||
if (!rpc_task_set_rpc_status(task, error))
|
||||
return;
|
||||
queue = READ_ONCE(task->tk_waitqueue);
|
||||
if (queue)
|
||||
rpc_wake_up_queued_task(queue, task);
|
||||
}
|
||||
|
||||
void rpc_exit(struct rpc_task *task, int status)
|
||||
@ -905,10 +925,16 @@ static void __rpc_execute(struct rpc_task *task)
|
||||
* Perform the next FSM step or a pending callback.
|
||||
*
|
||||
* tk_action may be NULL if the task has been killed.
|
||||
* In particular, note that rpc_killall_tasks may
|
||||
* do this at any time, so beware when dereferencing.
|
||||
*/
|
||||
do_action = task->tk_action;
|
||||
/* Tasks with an RPC error status should exit */
|
||||
if (do_action != rpc_exit_task &&
|
||||
(status = READ_ONCE(task->tk_rpc_status)) != 0) {
|
||||
task->tk_status = status;
|
||||
if (do_action != NULL)
|
||||
do_action = rpc_exit_task;
|
||||
}
|
||||
/* Callbacks override all actions */
|
||||
if (task->tk_callback) {
|
||||
do_action = task->tk_callback;
|
||||
task->tk_callback = NULL;
|
||||
@ -930,14 +956,6 @@ static void __rpc_execute(struct rpc_task *task)
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Signalled tasks should exit rather than sleep.
|
||||
*/
|
||||
if (RPC_SIGNALLED(task)) {
|
||||
task->tk_rpc_status = -ERESTARTSYS;
|
||||
rpc_exit(task, -ERESTARTSYS);
|
||||
}
|
||||
|
||||
/*
|
||||
* The queue->lock protects against races with
|
||||
* rpc_make_runnable().
|
||||
@ -953,6 +971,12 @@ static void __rpc_execute(struct rpc_task *task)
|
||||
spin_unlock(&queue->lock);
|
||||
continue;
|
||||
}
|
||||
/* Wake up any task that has an exit status */
|
||||
if (READ_ONCE(task->tk_rpc_status) != 0) {
|
||||
rpc_wake_up_task_queue_locked(queue, task);
|
||||
spin_unlock(&queue->lock);
|
||||
continue;
|
||||
}
|
||||
rpc_clear_running(task);
|
||||
spin_unlock(&queue->lock);
|
||||
if (task_is_async)
|
||||
@ -970,10 +994,7 @@ static void __rpc_execute(struct rpc_task *task)
|
||||
* clean up after sleeping on some queue, we don't
|
||||
* break the loop here, but go around once more.
|
||||
*/
|
||||
trace_rpc_task_signalled(task, task->tk_action);
|
||||
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
|
||||
task->tk_rpc_status = -ERESTARTSYS;
|
||||
rpc_exit(task, -ERESTARTSYS);
|
||||
rpc_signal_task(task);
|
||||
}
|
||||
trace_rpc_task_sync_wake(task, task->tk_action);
|
||||
}
|
||||
|
@ -1788,7 +1788,7 @@ static int xprt_alloc_id(struct rpc_xprt *xprt)
|
||||
{
|
||||
int id;
|
||||
|
||||
id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL);
|
||||
id = ida_alloc(&rpc_xprt_ids, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
@ -1798,7 +1798,7 @@ static int xprt_alloc_id(struct rpc_xprt *xprt)
|
||||
|
||||
static void xprt_free_id(struct rpc_xprt *xprt)
|
||||
{
|
||||
ida_simple_remove(&rpc_xprt_ids, xprt->id);
|
||||
ida_free(&rpc_xprt_ids, xprt->id);
|
||||
}
|
||||
|
||||
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
|
||||
@ -1822,10 +1822,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
|
||||
goto out_free;
|
||||
list_add(&req->rq_list, &xprt->free);
|
||||
}
|
||||
if (max_alloc > num_prealloc)
|
||||
xprt->max_reqs = max_alloc;
|
||||
else
|
||||
xprt->max_reqs = num_prealloc;
|
||||
xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc);
|
||||
xprt->min_reqs = num_prealloc;
|
||||
xprt->num_reqs = num_prealloc;
|
||||
|
||||
|
@ -103,7 +103,7 @@ static int xprt_switch_alloc_id(struct rpc_xprt_switch *xps, gfp_t gfp_flags)
|
||||
{
|
||||
int id;
|
||||
|
||||
id = ida_simple_get(&rpc_xprtswitch_ids, 0, 0, gfp_flags);
|
||||
id = ida_alloc(&rpc_xprtswitch_ids, gfp_flags);
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
@ -113,7 +113,7 @@ static int xprt_switch_alloc_id(struct rpc_xprt_switch *xps, gfp_t gfp_flags)
|
||||
|
||||
static void xprt_switch_free_id(struct rpc_xprt_switch *xps)
|
||||
{
|
||||
ida_simple_remove(&rpc_xprtswitch_ids, xps->xps_id);
|
||||
ida_free(&rpc_xprtswitch_ids, xps->xps_id);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -189,7 +189,7 @@ create_req:
|
||||
return NULL;
|
||||
|
||||
size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
|
||||
req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
|
||||
req = rpcrdma_req_create(r_xprt, size);
|
||||
if (!req)
|
||||
return NULL;
|
||||
if (rpcrdma_req_setup(r_xprt, req)) {
|
||||
|
@ -124,16 +124,16 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
|
||||
unsigned int depth = ep->re_max_fr_depth;
|
||||
struct scatterlist *sg;
|
||||
struct ib_mr *frmr;
|
||||
int rc;
|
||||
|
||||
sg = kcalloc_node(depth, sizeof(*sg), XPRTRDMA_GFP_FLAGS,
|
||||
ibdev_to_node(ep->re_id->device));
|
||||
if (!sg)
|
||||
return -ENOMEM;
|
||||
|
||||
frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
|
||||
if (IS_ERR(frmr))
|
||||
goto out_mr_err;
|
||||
|
||||
sg = kmalloc_array(depth, sizeof(*sg), GFP_KERNEL);
|
||||
if (!sg)
|
||||
goto out_list_err;
|
||||
|
||||
mr->mr_xprt = r_xprt;
|
||||
mr->mr_ibmr = frmr;
|
||||
mr->mr_device = NULL;
|
||||
@ -146,13 +146,9 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
|
||||
return 0;
|
||||
|
||||
out_mr_err:
|
||||
rc = PTR_ERR(frmr);
|
||||
trace_xprtrdma_frwr_alloc(mr, rc);
|
||||
return rc;
|
||||
|
||||
out_list_err:
|
||||
ib_dereg_mr(frmr);
|
||||
return -ENOMEM;
|
||||
kfree(sg);
|
||||
trace_xprtrdma_frwr_alloc(mr, PTR_ERR(frmr));
|
||||
return PTR_ERR(frmr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -119,12 +119,12 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
page = alloc_page(RPCRDMA_DEF_GFP);
|
||||
page = alloc_page(GFP_NOIO | __GFP_NOWARN);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
rqst->rq_buffer = page_address(page);
|
||||
|
||||
rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
|
||||
rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, GFP_NOIO | __GFP_NOWARN);
|
||||
if (!rqst->rq_rbuffer) {
|
||||
put_page(page);
|
||||
return -ENOMEM;
|
||||
|
@ -494,8 +494,7 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||
xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
|
||||
}
|
||||
trace_xprtrdma_op_connect(r_xprt, delay);
|
||||
queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker,
|
||||
delay);
|
||||
queue_delayed_work(system_long_wq, &r_xprt->rx_connect_worker, delay);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -76,8 +76,7 @@ static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
|
||||
static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
|
||||
static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
|
||||
static struct rpcrdma_regbuf *
|
||||
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
|
||||
gfp_t flags);
|
||||
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction);
|
||||
static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
|
||||
static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
|
||||
|
||||
@ -373,7 +372,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
|
||||
struct rpcrdma_ep *ep;
|
||||
int rc;
|
||||
|
||||
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
|
||||
ep = kzalloc(sizeof(*ep), XPRTRDMA_GFP_FLAGS);
|
||||
if (!ep)
|
||||
return -ENOTCONN;
|
||||
ep->re_xprt = &r_xprt->rx_xprt;
|
||||
@ -606,7 +605,7 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
|
||||
struct rpcrdma_sendctx *sc;
|
||||
|
||||
sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
|
||||
GFP_KERNEL);
|
||||
XPRTRDMA_GFP_FLAGS);
|
||||
if (!sc)
|
||||
return NULL;
|
||||
|
||||
@ -629,7 +628,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
|
||||
* Sends are posted.
|
||||
*/
|
||||
i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
|
||||
buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
|
||||
buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), XPRTRDMA_GFP_FLAGS);
|
||||
if (!buf->rb_sc_ctxs)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -740,13 +739,16 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
|
||||
{
|
||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||
struct rpcrdma_ep *ep = r_xprt->rx_ep;
|
||||
struct ib_device *device = ep->re_id->device;
|
||||
unsigned int count;
|
||||
|
||||
/* Try to allocate enough to perform one full-sized I/O */
|
||||
for (count = 0; count < ep->re_max_rdma_segs; count++) {
|
||||
struct rpcrdma_mr *mr;
|
||||
int rc;
|
||||
|
||||
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
|
||||
mr = kzalloc_node(sizeof(*mr), XPRTRDMA_GFP_FLAGS,
|
||||
ibdev_to_node(device));
|
||||
if (!mr)
|
||||
break;
|
||||
|
||||
@ -791,38 +793,33 @@ void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
|
||||
/* If there is no underlying connection, it's no use
|
||||
* to wake the refresh worker.
|
||||
*/
|
||||
if (ep->re_connect_status == 1) {
|
||||
/* The work is scheduled on a WQ_MEM_RECLAIM
|
||||
* workqueue in order to prevent MR allocation
|
||||
* from recursing into NFS during direct reclaim.
|
||||
*/
|
||||
queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
|
||||
}
|
||||
if (ep->re_connect_status != 1)
|
||||
return;
|
||||
queue_work(system_highpri_wq, &buf->rb_refresh_worker);
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcrdma_req_create - Allocate an rpcrdma_req object
|
||||
* @r_xprt: controlling r_xprt
|
||||
* @size: initial size, in bytes, of send and receive buffers
|
||||
* @flags: GFP flags passed to memory allocators
|
||||
*
|
||||
* Returns an allocated and fully initialized rpcrdma_req or NULL.
|
||||
*/
|
||||
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
|
||||
gfp_t flags)
|
||||
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
|
||||
size_t size)
|
||||
{
|
||||
struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
|
||||
struct rpcrdma_req *req;
|
||||
|
||||
req = kzalloc(sizeof(*req), flags);
|
||||
req = kzalloc(sizeof(*req), XPRTRDMA_GFP_FLAGS);
|
||||
if (req == NULL)
|
||||
goto out1;
|
||||
|
||||
req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
|
||||
req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE);
|
||||
if (!req->rl_sendbuf)
|
||||
goto out2;
|
||||
|
||||
req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
|
||||
req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE);
|
||||
if (!req->rl_recvbuf)
|
||||
goto out3;
|
||||
|
||||
@ -858,7 +855,7 @@ int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
||||
r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
|
||||
maxhdrsize *= sizeof(__be32);
|
||||
rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
|
||||
DMA_TO_DEVICE, GFP_KERNEL);
|
||||
DMA_TO_DEVICE);
|
||||
if (!rb)
|
||||
goto out;
|
||||
|
||||
@ -929,12 +926,12 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
|
||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||
struct rpcrdma_rep *rep;
|
||||
|
||||
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
|
||||
rep = kzalloc(sizeof(*rep), XPRTRDMA_GFP_FLAGS);
|
||||
if (rep == NULL)
|
||||
goto out;
|
||||
|
||||
rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
|
||||
DMA_FROM_DEVICE, GFP_KERNEL);
|
||||
DMA_FROM_DEVICE);
|
||||
if (!rep->rr_rdmabuf)
|
||||
goto out_free;
|
||||
|
||||
@ -1064,8 +1061,8 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
|
||||
for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
|
||||
struct rpcrdma_req *req;
|
||||
|
||||
req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
|
||||
GFP_KERNEL);
|
||||
req = rpcrdma_req_create(r_xprt,
|
||||
RPCRDMA_V1_DEF_INLINE_SIZE * 2);
|
||||
if (!req)
|
||||
goto out;
|
||||
list_add(&req->rl_list, &buf->rb_send_bufs);
|
||||
@ -1235,15 +1232,14 @@ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
|
||||
* or Replies they may be registered externally via frwr_map.
|
||||
*/
|
||||
static struct rpcrdma_regbuf *
|
||||
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
|
||||
gfp_t flags)
|
||||
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction)
|
||||
{
|
||||
struct rpcrdma_regbuf *rb;
|
||||
|
||||
rb = kmalloc(sizeof(*rb), flags);
|
||||
rb = kmalloc(sizeof(*rb), XPRTRDMA_GFP_FLAGS);
|
||||
if (!rb)
|
||||
return NULL;
|
||||
rb->rg_data = kmalloc(size, flags);
|
||||
rb->rg_data = kmalloc(size, XPRTRDMA_GFP_FLAGS);
|
||||
if (!rb->rg_data) {
|
||||
kfree(rb);
|
||||
return NULL;
|
||||
|
@ -149,7 +149,11 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
|
||||
return rb->rg_data;
|
||||
}
|
||||
|
||||
#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
|
||||
/* Do not use emergency memory reserves, and fail quickly if memory
|
||||
* cannot be allocated easily. These flags may be used wherever there
|
||||
* is robust logic to handle a failure to allocate.
|
||||
*/
|
||||
#define XPRTRDMA_GFP_FLAGS (__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
|
||||
|
||||
/* To ensure a transport can always make forward progress,
|
||||
* the number of RDMA segments allowed in header chunk lists
|
||||
@ -467,8 +471,8 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
|
||||
/*
|
||||
* Buffer calls - xprtrdma/verbs.c
|
||||
*/
|
||||
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
|
||||
gfp_t flags);
|
||||
struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
|
||||
size_t size);
|
||||
int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
|
||||
void rpcrdma_req_destroy(struct rpcrdma_req *req);
|
||||
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
|
||||
|
@ -261,7 +261,7 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
|
||||
switch (sap->sa_family) {
|
||||
case AF_LOCAL:
|
||||
sun = xs_addr_un(xprt);
|
||||
strlcpy(buf, sun->sun_path, sizeof(buf));
|
||||
strscpy(buf, sun->sun_path, sizeof(buf));
|
||||
xprt->address_strings[RPC_DISPLAY_ADDR] =
|
||||
kstrdup(buf, GFP_KERNEL);
|
||||
break;
|
||||
@ -1978,8 +1978,7 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||
* we'll need to figure out how to pass a namespace to
|
||||
* connect.
|
||||
*/
|
||||
task->tk_rpc_status = -ENOTCONN;
|
||||
rpc_exit(task, -ENOTCONN);
|
||||
rpc_task_set_rpc_status(task, -ENOTCONN);
|
||||
goto out_wake;
|
||||
}
|
||||
ret = xs_local_setup_socket(transport);
|
||||
|
Loading…
Reference in New Issue
Block a user