mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
NFS client updates for Linux 4.20
Highlights include: Stable fixes: - Fix the NFSv4.1 r/wsize sanity checking - Reset the RPC/RDMA credit grant properly after a disconnect - Fix a missed page unlock after pg_doio() Features and optimisations: - Overhaul of the RPC client socket code to eliminate a locking bottleneck and reduce the latency when transmitting lots of requests in parallel. - Allow parallelisation of the RPCSEC_GSS encoding of an RPC request. - Convert the RPC client socket receive code to use iovec_iter() for improved efficiency. - Convert several NFS and RPC lookup operations to use RCU instead of taking global locks. - Avoid the need for BH-safe locks in the RPC/RDMA back channel. Bugfixes and cleanups: - Fix lock recovery during NFSv4 delegation recalls - Fix the NFSv4 + NFSv4.1 "lookup revalidate + open file" case. - Fixes for the RPC connection metrics - Various RPC client layer cleanups to consolidate stream based sockets - RPC/RDMA connection cleanups - Simplify the RPC/RDMA cleanup after memory operation failures - Clean ups for NFS v4.2 copy completion and NFSv4 open state reclaim. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJb0zW8AAoJEA4mA3inWBJcmccP/0hkeNFk2y4tErit1lq4TYDs sMkFv0rjhBkxWbZFmGJfAulbQ5cu+GwTBqqmhm67rE+2C+vevrE4JRfDFmcEGpio lE/2uJdqu1UlIOiovyjk0jMetUuf2LTS82vloPP/z5mmvgQ4S1NSajUGuPbjQR2S AtTj0XGI5e1nm8PZDftbomcxD5HUYaITQEDCyrm8a7xX8OZ5ySXakzdgXuNM5TgI MPjcpOFvIARwF4MhovYFZtSInB5XiZYSiTAB03deVgy38JDsSPeQgwUVWjErrq/K V/6kOg8EYd0uNFmUCwKX/ecbvAlnbfqAMX+YcL0ZrbVk0pBqxVvoGVXK8ex8Wbm1 eL9tyYK81Sc7TliXr2+R22CHDcMTTMImFLix5Gp6mk2Fd5TpMydV9c9S7NBCHYB4 rgcM9brgutFF6N8zqdBpa1FVH3cBE1A428/90kp4XU/kdQlxIvYBLBCylI25POEL 7oqhcJxljFLWXZdhmH7t3WV0RWOzITZHEp9foL8p6yAPzOSWPF98OlQU+FmLj3Y4 EZ61qLXIRxYpLf1aZh7GNKms5ZzOhKiZgw43UL3pl4xKhk2i9061IUKGSEHgIklk BX34dmCALDlapt+Ggcm1uIe9BLCc4KADfixqNfr91dSOycFM2RajsSZCPrP9Gx8G t8rYl8x+lLZ5ZxLkdTUP =Fn8z -----END PGP SIGNATURE----- Merge tag 'nfs-for-4.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs Pull NFS client updates from Trond Myklebust: "Highlights include: Stable fixes: - Fix the NFSv4.1 r/wsize sanity checking - Reset the RPC/RDMA credit grant properly after a disconnect - Fix a missed page unlock after pg_doio() Features and optimisations: - Overhaul of the RPC client socket code to eliminate a locking bottleneck and reduce the latency when transmitting lots of requests in parallel. - Allow parallelisation of the RPCSEC_GSS encoding of an RPC request. - Convert the RPC client socket receive code to use iovec_iter() for improved efficiency. - Convert several NFS and RPC lookup operations to use RCU instead of taking global locks. - Avoid the need for BH-safe locks in the RPC/RDMA back channel. Bugfixes and cleanups: - Fix lock recovery during NFSv4 delegation recalls - Fix the NFSv4 + NFSv4.1 "lookup revalidate + open file" case. - Fixes for the RPC connection metrics - Various RPC client layer cleanups to consolidate stream based sockets - RPC/RDMA connection cleanups - Simplify the RPC/RDMA cleanup after memory operation failures - Clean ups for NFS v4.2 copy completion and NFSv4 open state reclaim" * tag 'nfs-for-4.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (97 commits) SUNRPC: Convert the auth cred cache to use refcount_t SUNRPC: Convert auth creds to use refcount_t SUNRPC: Simplify lookup code SUNRPC: Clean up the AUTH cache code NFS: change sign of nfs_fh length sunrpc: safely reallow resvport min/max inversion nfs: remove redundant call to nfs_context_set_write_error() nfs: Fix a missed page unlock after pg_doio() SUNRPC: Fix a compile warning for cmpxchg64() NFSv4.x: fix lock recovery during delegation recall SUNRPC: use cmpxchg64() in gss_seq_send64_fetch_and_inc() xprtrdma: Squelch a sparse warning xprtrdma: Clean up xprt_rdma_disconnect_inject xprtrdma: Add documenting comments xprtrdma: Report when there were zero posted Receives xprtrdma: Move rb_flags initialization xprtrdma: Don't disable BH's in backchannel server xprtrdma: Remove memory address of "ep" from an error message xprtrdma: Rename rpcrdma_qp_async_error_upcall xprtrdma: Simplify RPC wake-ups on connect ...
This commit is contained in:
commit
c7a2c49ea6
@ -93,7 +93,7 @@ int nfs4_check_delegation(struct inode *inode, fmode_t flags)
|
||||
return nfs4_do_check_delegation(inode, flags, false);
|
||||
}
|
||||
|
||||
static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
|
||||
static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid)
|
||||
{
|
||||
struct inode *inode = state->inode;
|
||||
struct file_lock *fl;
|
||||
@ -108,7 +108,7 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
|
||||
spin_lock(&flctx->flc_lock);
|
||||
restart:
|
||||
list_for_each_entry(fl, list, fl_list) {
|
||||
if (nfs_file_open_context(fl->fl_file) != ctx)
|
||||
if (nfs_file_open_context(fl->fl_file)->state != state)
|
||||
continue;
|
||||
spin_unlock(&flctx->flc_lock);
|
||||
status = nfs4_lock_delegation_recall(fl, state, stateid);
|
||||
@ -136,8 +136,8 @@ static int nfs_delegation_claim_opens(struct inode *inode,
|
||||
int err;
|
||||
|
||||
again:
|
||||
spin_lock(&inode->i_lock);
|
||||
list_for_each_entry(ctx, &nfsi->open_files, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
|
||||
state = ctx->state;
|
||||
if (state == NULL)
|
||||
continue;
|
||||
@ -147,15 +147,16 @@ again:
|
||||
continue;
|
||||
if (!nfs4_stateid_match(&state->stateid, stateid))
|
||||
continue;
|
||||
get_nfs_open_context(ctx);
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (!get_nfs_open_context(ctx))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
sp = state->owner;
|
||||
/* Block nfs4_proc_unlck */
|
||||
mutex_lock(&sp->so_delegreturn_mutex);
|
||||
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
|
||||
err = nfs4_open_delegation_recall(ctx, state, stateid, type);
|
||||
if (!err)
|
||||
err = nfs_delegation_claim_locks(ctx, state, stateid);
|
||||
err = nfs_delegation_claim_locks(state, stateid);
|
||||
if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
|
||||
err = -EAGAIN;
|
||||
mutex_unlock(&sp->so_delegreturn_mutex);
|
||||
@ -164,7 +165,7 @@ again:
|
||||
return err;
|
||||
goto again;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
293
fs/nfs/dir.c
293
fs/nfs/dir.c
@ -1072,6 +1072,100 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
|
||||
return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
|
||||
}
|
||||
|
||||
static int
|
||||
nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
|
||||
struct inode *inode, int error)
|
||||
{
|
||||
switch (error) {
|
||||
case 1:
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
|
||||
__func__, dentry);
|
||||
return 1;
|
||||
case 0:
|
||||
nfs_mark_for_revalidate(dir);
|
||||
if (inode && S_ISDIR(inode->i_mode)) {
|
||||
/* Purge readdir caches. */
|
||||
nfs_zap_caches(inode);
|
||||
/*
|
||||
* We can't d_drop the root of a disconnected tree:
|
||||
* its d_hash is on the s_anon list and d_drop() would hide
|
||||
* it from shrink_dcache_for_unmount(), leading to busy
|
||||
* inodes on unmount and further oopses.
|
||||
*/
|
||||
if (IS_ROOT(dentry))
|
||||
return 1;
|
||||
}
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
|
||||
__func__, dentry);
|
||||
return 0;
|
||||
}
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
|
||||
__func__, dentry, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
int ret = 1;
|
||||
if (nfs_neg_need_reval(dir, dentry, flags)) {
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
ret = 0;
|
||||
}
|
||||
return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
|
||||
}
|
||||
|
||||
static int
|
||||
nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
|
||||
struct inode *inode)
|
||||
{
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
|
||||
}
|
||||
|
||||
static int
|
||||
nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
|
||||
struct inode *inode)
|
||||
{
|
||||
struct nfs_fh *fhandle;
|
||||
struct nfs_fattr *fattr;
|
||||
struct nfs4_label *label;
|
||||
int ret;
|
||||
|
||||
ret = -ENOMEM;
|
||||
fhandle = nfs_alloc_fhandle();
|
||||
fattr = nfs_alloc_fattr();
|
||||
label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
|
||||
if (fhandle == NULL || fattr == NULL || IS_ERR(label))
|
||||
goto out;
|
||||
|
||||
ret = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
|
||||
if (ret < 0) {
|
||||
if (ret == -ESTALE || ret == -ENOENT)
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
ret = 0;
|
||||
if (nfs_compare_fh(NFS_FH(inode), fhandle))
|
||||
goto out;
|
||||
if (nfs_refresh_inode(inode, fattr) < 0)
|
||||
goto out;
|
||||
|
||||
nfs_setsecurity(inode, fattr, label);
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
|
||||
/* set a readdirplus hint that we had a cache miss */
|
||||
nfs_force_use_readdirplus(dir);
|
||||
ret = 1;
|
||||
out:
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
nfs4_label_free(label);
|
||||
return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called every time the dcache has a lookup hit,
|
||||
* and we should check whether we can really trust that
|
||||
@ -1083,58 +1177,36 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
|
||||
* If the parent directory is seen to have changed, we throw out the
|
||||
* cached dentry and do a new lookup.
|
||||
*/
|
||||
static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
static int
|
||||
nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct inode *dir;
|
||||
struct inode *inode;
|
||||
struct dentry *parent;
|
||||
struct nfs_fh *fhandle = NULL;
|
||||
struct nfs_fattr *fattr = NULL;
|
||||
struct nfs4_label *label = NULL;
|
||||
int error;
|
||||
|
||||
if (flags & LOOKUP_RCU) {
|
||||
parent = READ_ONCE(dentry->d_parent);
|
||||
dir = d_inode_rcu(parent);
|
||||
if (!dir)
|
||||
return -ECHILD;
|
||||
} else {
|
||||
parent = dget_parent(dentry);
|
||||
dir = d_inode(parent);
|
||||
}
|
||||
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
|
||||
inode = d_inode(dentry);
|
||||
|
||||
if (!inode) {
|
||||
if (nfs_neg_need_reval(dir, dentry, flags)) {
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
goto out_bad;
|
||||
}
|
||||
goto out_valid;
|
||||
}
|
||||
if (!inode)
|
||||
return nfs_lookup_revalidate_negative(dir, dentry, flags);
|
||||
|
||||
if (is_bad_inode(inode)) {
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
|
||||
__func__, dentry);
|
||||
goto out_bad;
|
||||
}
|
||||
|
||||
if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
|
||||
goto out_set_verifier;
|
||||
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
|
||||
|
||||
/* Force a full look up iff the parent directory has changed */
|
||||
if (!(flags & (LOOKUP_EXCL | LOOKUP_REVAL)) &&
|
||||
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
|
||||
error = nfs_lookup_verify_inode(inode, flags);
|
||||
if (error) {
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
if (error == -ESTALE)
|
||||
goto out_zap_parent;
|
||||
goto out_error;
|
||||
nfs_zap_caches(dir);
|
||||
goto out_bad;
|
||||
}
|
||||
nfs_advise_use_readdirplus(dir);
|
||||
goto out_valid;
|
||||
@ -1146,81 +1218,45 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
if (NFS_STALE(inode))
|
||||
goto out_bad;
|
||||
|
||||
error = -ENOMEM;
|
||||
fhandle = nfs_alloc_fhandle();
|
||||
fattr = nfs_alloc_fattr();
|
||||
if (fhandle == NULL || fattr == NULL)
|
||||
goto out_error;
|
||||
|
||||
label = nfs4_label_alloc(NFS_SERVER(inode), GFP_NOWAIT);
|
||||
if (IS_ERR(label))
|
||||
goto out_error;
|
||||
|
||||
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
|
||||
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
|
||||
error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
|
||||
trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
|
||||
if (error == -ESTALE || error == -ENOENT)
|
||||
goto out_bad;
|
||||
if (error)
|
||||
goto out_error;
|
||||
if (nfs_compare_fh(NFS_FH(inode), fhandle))
|
||||
goto out_bad;
|
||||
if ((error = nfs_refresh_inode(inode, fattr)) != 0)
|
||||
goto out_bad;
|
||||
return error;
|
||||
out_valid:
|
||||
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
|
||||
out_bad:
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
|
||||
}
|
||||
|
||||
nfs_setsecurity(inode, fattr, label);
|
||||
static int
|
||||
__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
|
||||
int (*reval)(struct inode *, struct dentry *, unsigned int))
|
||||
{
|
||||
struct dentry *parent;
|
||||
struct inode *dir;
|
||||
int ret;
|
||||
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
nfs4_label_free(label);
|
||||
|
||||
/* set a readdirplus hint that we had a cache miss */
|
||||
nfs_force_use_readdirplus(dir);
|
||||
|
||||
out_set_verifier:
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
out_valid:
|
||||
if (flags & LOOKUP_RCU) {
|
||||
parent = READ_ONCE(dentry->d_parent);
|
||||
dir = d_inode_rcu(parent);
|
||||
if (!dir)
|
||||
return -ECHILD;
|
||||
ret = reval(dir, dentry, flags);
|
||||
if (parent != READ_ONCE(dentry->d_parent))
|
||||
return -ECHILD;
|
||||
} else
|
||||
} else {
|
||||
parent = dget_parent(dentry);
|
||||
ret = reval(d_inode(parent), dentry, flags);
|
||||
dput(parent);
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
|
||||
__func__, dentry);
|
||||
return 1;
|
||||
out_zap_parent:
|
||||
nfs_zap_caches(dir);
|
||||
out_bad:
|
||||
WARN_ON(flags & LOOKUP_RCU);
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
nfs4_label_free(label);
|
||||
nfs_mark_for_revalidate(dir);
|
||||
if (inode && S_ISDIR(inode->i_mode)) {
|
||||
/* Purge readdir caches. */
|
||||
nfs_zap_caches(inode);
|
||||
/*
|
||||
* We can't d_drop the root of a disconnected tree:
|
||||
* its d_hash is on the s_anon list and d_drop() would hide
|
||||
* it from shrink_dcache_for_unmount(), leading to busy
|
||||
* inodes on unmount and further oopses.
|
||||
*/
|
||||
if (IS_ROOT(dentry))
|
||||
goto out_valid;
|
||||
}
|
||||
dput(parent);
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
|
||||
__func__, dentry);
|
||||
return 0;
|
||||
out_error:
|
||||
WARN_ON(flags & LOOKUP_RCU);
|
||||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
nfs4_label_free(label);
|
||||
dput(parent);
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
|
||||
__func__, dentry, error);
|
||||
return error;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1579,62 +1615,55 @@ no_open:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_atomic_open);
|
||||
|
||||
static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
static int
|
||||
nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct inode *inode;
|
||||
int ret = 0;
|
||||
|
||||
if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
|
||||
goto no_open;
|
||||
goto full_reval;
|
||||
if (d_mountpoint(dentry))
|
||||
goto no_open;
|
||||
if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1)
|
||||
goto no_open;
|
||||
goto full_reval;
|
||||
|
||||
inode = d_inode(dentry);
|
||||
|
||||
/* We can't create new files in nfs_open_revalidate(), so we
|
||||
* optimize away revalidation of negative dentries.
|
||||
*/
|
||||
if (inode == NULL) {
|
||||
struct dentry *parent;
|
||||
struct inode *dir;
|
||||
if (inode == NULL)
|
||||
goto full_reval;
|
||||
|
||||
if (flags & LOOKUP_RCU) {
|
||||
parent = READ_ONCE(dentry->d_parent);
|
||||
dir = d_inode_rcu(parent);
|
||||
if (!dir)
|
||||
return -ECHILD;
|
||||
} else {
|
||||
parent = dget_parent(dentry);
|
||||
dir = d_inode(parent);
|
||||
}
|
||||
if (!nfs_neg_need_reval(dir, dentry, flags))
|
||||
ret = 1;
|
||||
else if (flags & LOOKUP_RCU)
|
||||
ret = -ECHILD;
|
||||
if (!(flags & LOOKUP_RCU))
|
||||
dput(parent);
|
||||
else if (parent != READ_ONCE(dentry->d_parent))
|
||||
return -ECHILD;
|
||||
goto out;
|
||||
}
|
||||
if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
|
||||
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
|
||||
|
||||
/* NFS only supports OPEN on regular files */
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
goto no_open;
|
||||
goto full_reval;
|
||||
|
||||
/* We cannot do exclusive creation on a positive dentry */
|
||||
if (flags & LOOKUP_EXCL)
|
||||
goto no_open;
|
||||
if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
|
||||
goto reval_dentry;
|
||||
|
||||
/* Check if the directory changed */
|
||||
if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
|
||||
goto reval_dentry;
|
||||
|
||||
/* Let f_op->open() actually open (and revalidate) the file */
|
||||
ret = 1;
|
||||
return 1;
|
||||
reval_dentry:
|
||||
if (flags & LOOKUP_RCU)
|
||||
return -ECHILD;
|
||||
return nfs_lookup_revalidate_dentry(dir, dentry, inode);;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
full_reval:
|
||||
return nfs_do_lookup_revalidate(dir, dentry, flags);
|
||||
}
|
||||
|
||||
no_open:
|
||||
return nfs_lookup_revalidate(dentry, flags);
|
||||
static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
return __nfs_lookup_revalidate(dentry, flags,
|
||||
nfs4_do_lookup_revalidate);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NFSV4 */
|
||||
|
@ -1164,6 +1164,7 @@ static struct pnfs_layoutdriver_type filelayout_type = {
|
||||
.id = LAYOUT_NFSV4_1_FILES,
|
||||
.name = "LAYOUT_NFSV4_1_FILES",
|
||||
.owner = THIS_MODULE,
|
||||
.max_layoutget_response = 4096, /* 1 page or so... */
|
||||
.alloc_layout_hdr = filelayout_alloc_layout_hdr,
|
||||
.free_layout_hdr = filelayout_free_layout_hdr,
|
||||
.alloc_lseg = filelayout_alloc_lseg,
|
||||
|
@ -2356,6 +2356,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
|
||||
.name = "LAYOUT_FLEX_FILES",
|
||||
.owner = THIS_MODULE,
|
||||
.flags = PNFS_LAYOUTGET_ON_OPEN,
|
||||
.max_layoutget_response = 4096, /* 1 page or so... */
|
||||
.set_layoutdriver = ff_layout_set_layoutdriver,
|
||||
.alloc_layout_hdr = ff_layout_alloc_layout_hdr,
|
||||
.free_layout_hdr = ff_layout_free_layout_hdr,
|
||||
|
@ -453,7 +453,7 @@ ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
|
||||
struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
|
||||
struct rpc_cred *cred;
|
||||
|
||||
if (mirror) {
|
||||
if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) {
|
||||
cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
|
||||
if (!cred)
|
||||
cred = get_rpccred(mdscred);
|
||||
|
@ -857,15 +857,14 @@ static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
|
||||
|
||||
static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
|
||||
{
|
||||
struct nfs_lock_context *head = &ctx->lock_context;
|
||||
struct nfs_lock_context *pos = head;
|
||||
struct nfs_lock_context *pos;
|
||||
|
||||
do {
|
||||
list_for_each_entry_rcu(pos, &ctx->lock_context.list, list) {
|
||||
if (pos->lockowner != current->files)
|
||||
continue;
|
||||
refcount_inc(&pos->count);
|
||||
return pos;
|
||||
} while ((pos = list_entry(pos->list.next, typeof(*pos), list)) != head);
|
||||
if (refcount_inc_not_zero(&pos->count))
|
||||
return pos;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -874,10 +873,10 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
|
||||
struct nfs_lock_context *res, *new = NULL;
|
||||
struct inode *inode = d_inode(ctx->dentry);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
rcu_read_lock();
|
||||
res = __nfs_find_lock_context(ctx);
|
||||
rcu_read_unlock();
|
||||
if (res == NULL) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
||||
if (new == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -885,14 +884,14 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
|
||||
spin_lock(&inode->i_lock);
|
||||
res = __nfs_find_lock_context(ctx);
|
||||
if (res == NULL) {
|
||||
list_add_tail(&new->list, &ctx->lock_context.list);
|
||||
list_add_tail_rcu(&new->list, &ctx->lock_context.list);
|
||||
new->open_context = ctx;
|
||||
res = new;
|
||||
new = NULL;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
kfree(new);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
kfree(new);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_get_lock_context);
|
||||
@ -904,9 +903,9 @@ void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
|
||||
|
||||
if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock))
|
||||
return;
|
||||
list_del(&l_ctx->list);
|
||||
list_del_rcu(&l_ctx->list);
|
||||
spin_unlock(&inode->i_lock);
|
||||
kfree(l_ctx);
|
||||
kfree_rcu(l_ctx, rcu_head);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_put_lock_context);
|
||||
|
||||
@ -978,9 +977,9 @@ EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
|
||||
|
||||
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
|
||||
{
|
||||
if (ctx != NULL)
|
||||
refcount_inc(&ctx->lock_context.count);
|
||||
return ctx;
|
||||
if (ctx != NULL && refcount_inc_not_zero(&ctx->lock_context.count))
|
||||
return ctx;
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_nfs_open_context);
|
||||
|
||||
@ -989,13 +988,13 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
|
||||
struct inode *inode = d_inode(ctx->dentry);
|
||||
struct super_block *sb = ctx->dentry->d_sb;
|
||||
|
||||
if (!list_empty(&ctx->list)) {
|
||||
if (!refcount_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
|
||||
return;
|
||||
list_del(&ctx->list);
|
||||
spin_unlock(&inode->i_lock);
|
||||
} else if (!refcount_dec_and_test(&ctx->lock_context.count))
|
||||
if (!refcount_dec_and_test(&ctx->lock_context.count))
|
||||
return;
|
||||
if (!list_empty(&ctx->list)) {
|
||||
spin_lock(&inode->i_lock);
|
||||
list_del_rcu(&ctx->list);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
if (inode != NULL)
|
||||
NFS_PROTO(inode)->close_context(ctx, is_sync);
|
||||
if (ctx->cred != NULL)
|
||||
@ -1003,7 +1002,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
|
||||
dput(ctx->dentry);
|
||||
nfs_sb_deactive(sb);
|
||||
kfree(ctx->mdsthreshold);
|
||||
kfree(ctx);
|
||||
kfree_rcu(ctx, rcu_head);
|
||||
}
|
||||
|
||||
void put_nfs_open_context(struct nfs_open_context *ctx)
|
||||
@ -1027,10 +1026,7 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
if (ctx->mode & FMODE_WRITE)
|
||||
list_add(&ctx->list, &nfsi->open_files);
|
||||
else
|
||||
list_add_tail(&ctx->list, &nfsi->open_files);
|
||||
list_add_tail_rcu(&ctx->list, &nfsi->open_files);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
|
||||
@ -1051,16 +1047,17 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs_open_context *pos, *ctx = NULL;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
list_for_each_entry(pos, &nfsi->open_files, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pos, &nfsi->open_files, list) {
|
||||
if (cred != NULL && pos->cred != cred)
|
||||
continue;
|
||||
if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
|
||||
continue;
|
||||
ctx = get_nfs_open_context(pos);
|
||||
break;
|
||||
if (ctx)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
rcu_read_unlock();
|
||||
return ctx;
|
||||
}
|
||||
|
||||
@ -1078,9 +1075,6 @@ void nfs_file_clear_open_context(struct file *filp)
|
||||
if (ctx->error < 0)
|
||||
invalidate_inode_pages2(inode->i_mapping);
|
||||
filp->private_data = NULL;
|
||||
spin_lock(&inode->i_lock);
|
||||
list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
|
||||
spin_unlock(&inode->i_lock);
|
||||
put_nfs_open_context_sync(ctx);
|
||||
}
|
||||
}
|
||||
@ -1329,19 +1323,11 @@ static bool nfs_file_has_writers(struct nfs_inode *nfsi)
|
||||
{
|
||||
struct inode *inode = &nfsi->vfs_inode;
|
||||
|
||||
assert_spin_locked(&inode->i_lock);
|
||||
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return false;
|
||||
if (list_empty(&nfsi->open_files))
|
||||
return false;
|
||||
/* Note: This relies on nfsi->open_files being ordered with writers
|
||||
* being placed at the head of the list.
|
||||
* See nfs_inode_attach_open_context()
|
||||
*/
|
||||
return (list_first_entry(&nfsi->open_files,
|
||||
struct nfs_open_context,
|
||||
list)->mode & FMODE_WRITE) == FMODE_WRITE;
|
||||
return inode_is_open_for_write(inode);
|
||||
}
|
||||
|
||||
static bool nfs_file_has_buffered_writers(struct nfs_inode *nfsi)
|
||||
|
@ -786,6 +786,7 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
||||
static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
|
||||
{
|
||||
struct inode *inode = hdr->inode;
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
|
||||
if (hdr->pgio_done_cb != NULL)
|
||||
return hdr->pgio_done_cb(task, hdr);
|
||||
@ -793,6 +794,9 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
|
||||
if (nfs3_async_handle_jukebox(task, inode))
|
||||
return -EAGAIN;
|
||||
|
||||
if (task->tk_status >= 0 && !server->read_hdrsize)
|
||||
cmpxchg(&server->read_hdrsize, 0, hdr->res.replen);
|
||||
|
||||
nfs_invalidate_atime(inode);
|
||||
nfs_refresh_inode(inode, &hdr->fattr);
|
||||
return 0;
|
||||
@ -802,6 +806,7 @@ static void nfs3_proc_read_setup(struct nfs_pgio_header *hdr,
|
||||
struct rpc_message *msg)
|
||||
{
|
||||
msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ];
|
||||
hdr->args.replen = NFS_SERVER(hdr->inode)->read_hdrsize;
|
||||
}
|
||||
|
||||
static int nfs3_proc_pgio_rpc_prepare(struct rpc_task *task,
|
||||
|
@ -983,10 +983,11 @@ static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
|
||||
const void *data)
|
||||
{
|
||||
const struct nfs_pgio_args *args = data;
|
||||
unsigned int replen = args->replen ? args->replen : NFS3_readres_sz;
|
||||
|
||||
encode_read3args(xdr, args);
|
||||
prepare_reply_buffer(req, args->pages, args->pgbase,
|
||||
args->count, NFS3_readres_sz);
|
||||
args->count, replen);
|
||||
req->rq_rcv_buf.flags |= XDRBUF_READ;
|
||||
}
|
||||
|
||||
@ -1364,10 +1365,12 @@ static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
|
||||
|
||||
encode_nfs_fh3(xdr, args->fh);
|
||||
encode_uint32(xdr, args->mask);
|
||||
if (args->mask & (NFS_ACL | NFS_DFACL))
|
||||
if (args->mask & (NFS_ACL | NFS_DFACL)) {
|
||||
prepare_reply_buffer(req, args->pages, 0,
|
||||
NFSACL_MAXPAGES << PAGE_SHIFT,
|
||||
ACL3_getaclres_sz);
|
||||
req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
|
||||
}
|
||||
}
|
||||
|
||||
static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
|
||||
@ -1673,9 +1676,11 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
|
||||
void *data)
|
||||
{
|
||||
struct nfs_pgio_res *result = data;
|
||||
unsigned int pos;
|
||||
enum nfs_stat status;
|
||||
int error;
|
||||
|
||||
pos = xdr_stream_pos(xdr);
|
||||
error = decode_nfsstat3(xdr, &status);
|
||||
if (unlikely(error))
|
||||
goto out;
|
||||
@ -1685,6 +1690,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
|
||||
result->op_status = status;
|
||||
if (status != NFS3_OK)
|
||||
goto out_status;
|
||||
result->replen = 3 + ((xdr_stream_pos(xdr) - pos) >> 2);
|
||||
error = decode_read3resok(xdr, result);
|
||||
out:
|
||||
return error;
|
||||
|
@ -188,9 +188,10 @@ struct nfs4_state {
|
||||
unsigned int n_wronly; /* Number of write-only references */
|
||||
unsigned int n_rdwr; /* Number of read/write references */
|
||||
fmode_t state; /* State on the server (R,W, or RW) */
|
||||
atomic_t count;
|
||||
refcount_t count;
|
||||
|
||||
wait_queue_head_t waitq;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
|
||||
|
@ -950,10 +950,10 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
|
||||
|
||||
/*
|
||||
* Session has been established, and the client marked ready.
|
||||
* Set the mount rsize and wsize with negotiated fore channel
|
||||
* attributes which will be bound checked in nfs_server_set_fsinfo.
|
||||
* Limit the mount rsize, wsize and dtsize using negotiated fore
|
||||
* channel attributes.
|
||||
*/
|
||||
static void nfs4_session_set_rwsize(struct nfs_server *server)
|
||||
static void nfs4_session_limit_rwsize(struct nfs_server *server)
|
||||
{
|
||||
#ifdef CONFIG_NFS_V4_1
|
||||
struct nfs4_session *sess;
|
||||
@ -966,9 +966,11 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
|
||||
server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
|
||||
server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
|
||||
|
||||
if (!server->rsize || server->rsize > server_resp_sz)
|
||||
if (server->dtsize > server_resp_sz)
|
||||
server->dtsize = server_resp_sz;
|
||||
if (server->rsize > server_resp_sz)
|
||||
server->rsize = server_resp_sz;
|
||||
if (!server->wsize || server->wsize > server_rqst_sz)
|
||||
if (server->wsize > server_rqst_sz)
|
||||
server->wsize = server_rqst_sz;
|
||||
#endif /* CONFIG_NFS_V4_1 */
|
||||
}
|
||||
@ -1015,12 +1017,12 @@ static int nfs4_server_common_setup(struct nfs_server *server,
|
||||
(unsigned long long) server->fsid.minor);
|
||||
nfs_display_fhandle(mntfh, "Pseudo-fs root FH");
|
||||
|
||||
nfs4_session_set_rwsize(server);
|
||||
|
||||
error = nfs_probe_fsinfo(server, mntfh, fattr);
|
||||
if (error < 0)
|
||||
goto out;
|
||||
|
||||
nfs4_session_limit_rwsize(server);
|
||||
|
||||
if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
|
||||
server->namelen = NFS4_MAXNAMLEN;
|
||||
|
||||
|
@ -1349,12 +1349,20 @@ static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
|
||||
return false;
|
||||
}
|
||||
|
||||
static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
|
||||
static int can_open_cached(struct nfs4_state *state, fmode_t mode,
|
||||
int open_mode, enum open_claim_type4 claim)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (open_mode & (O_EXCL|O_TRUNC))
|
||||
goto out;
|
||||
switch (claim) {
|
||||
case NFS4_OPEN_CLAIM_NULL:
|
||||
case NFS4_OPEN_CLAIM_FH:
|
||||
goto out;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
switch (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
case FMODE_READ:
|
||||
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
|
||||
@ -1747,7 +1755,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
|
||||
|
||||
for (;;) {
|
||||
spin_lock(&state->owner->so_lock);
|
||||
if (can_open_cached(state, fmode, open_mode)) {
|
||||
if (can_open_cached(state, fmode, open_mode, claim)) {
|
||||
update_open_stateflags(state, fmode);
|
||||
spin_unlock(&state->owner->so_lock);
|
||||
goto out_return_state;
|
||||
@ -1777,7 +1785,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
|
||||
out:
|
||||
return ERR_PTR(ret);
|
||||
out_return_state:
|
||||
atomic_inc(&state->count);
|
||||
refcount_inc(&state->count);
|
||||
return state;
|
||||
}
|
||||
|
||||
@ -1849,7 +1857,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
|
||||
update:
|
||||
update_open_stateid(state, &data->o_res.stateid, NULL,
|
||||
data->o_arg.fmode);
|
||||
atomic_inc(&state->count);
|
||||
refcount_inc(&state->count);
|
||||
|
||||
return state;
|
||||
}
|
||||
@ -1887,7 +1895,7 @@ nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
|
||||
return ERR_CAST(inode);
|
||||
if (data->state != NULL && data->state->inode == inode) {
|
||||
state = data->state;
|
||||
atomic_inc(&state->count);
|
||||
refcount_inc(&state->count);
|
||||
} else
|
||||
state = nfs4_get_open_state(inode, data->owner);
|
||||
iput(inode);
|
||||
@ -1933,23 +1941,41 @@ nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
|
||||
static struct nfs_open_context *
|
||||
nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(state->inode);
|
||||
struct nfs_open_context *ctx;
|
||||
|
||||
spin_lock(&state->inode->i_lock);
|
||||
list_for_each_entry(ctx, &nfsi->open_files, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
|
||||
if (ctx->state != state)
|
||||
continue;
|
||||
get_nfs_open_context(ctx);
|
||||
spin_unlock(&state->inode->i_lock);
|
||||
if ((ctx->mode & mode) != mode)
|
||||
continue;
|
||||
if (!get_nfs_open_context(ctx))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
return ctx;
|
||||
}
|
||||
spin_unlock(&state->inode->i_lock);
|
||||
rcu_read_unlock();
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static struct nfs_open_context *
|
||||
nfs4_state_find_open_context(struct nfs4_state *state)
|
||||
{
|
||||
struct nfs_open_context *ctx;
|
||||
|
||||
ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
|
||||
if (!IS_ERR(ctx))
|
||||
return ctx;
|
||||
ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
|
||||
if (!IS_ERR(ctx))
|
||||
return ctx;
|
||||
return nfs4_state_find_open_context_mode(state, FMODE_READ);
|
||||
}
|
||||
|
||||
static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
|
||||
struct nfs4_state *state, enum open_claim_type4 claim)
|
||||
{
|
||||
@ -1960,7 +1986,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
|
||||
if (opendata == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
opendata->state = state;
|
||||
atomic_inc(&state->count);
|
||||
refcount_inc(&state->count);
|
||||
return opendata;
|
||||
}
|
||||
|
||||
@ -2276,7 +2302,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
|
||||
if (data->state != NULL) {
|
||||
struct nfs_delegation *delegation;
|
||||
|
||||
if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
|
||||
if (can_open_cached(data->state, data->o_arg.fmode,
|
||||
data->o_arg.open_flags, claim))
|
||||
goto out_no_action;
|
||||
rcu_read_lock();
|
||||
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
|
||||
|
@ -655,7 +655,7 @@ nfs4_alloc_open_state(void)
|
||||
state = kzalloc(sizeof(*state), GFP_NOFS);
|
||||
if (!state)
|
||||
return NULL;
|
||||
atomic_set(&state->count, 1);
|
||||
refcount_set(&state->count, 1);
|
||||
INIT_LIST_HEAD(&state->lock_states);
|
||||
spin_lock_init(&state->state_lock);
|
||||
seqlock_init(&state->seqlock);
|
||||
@ -684,12 +684,12 @@ __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs4_state *state;
|
||||
|
||||
list_for_each_entry(state, &nfsi->open_states, inode_states) {
|
||||
list_for_each_entry_rcu(state, &nfsi->open_states, inode_states) {
|
||||
if (state->owner != owner)
|
||||
continue;
|
||||
if (!nfs4_valid_open_stateid(state))
|
||||
continue;
|
||||
if (atomic_inc_not_zero(&state->count))
|
||||
if (refcount_inc_not_zero(&state->count))
|
||||
return state;
|
||||
}
|
||||
return NULL;
|
||||
@ -698,7 +698,7 @@ __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
|
||||
static void
|
||||
nfs4_free_open_state(struct nfs4_state *state)
|
||||
{
|
||||
kfree(state);
|
||||
kfree_rcu(state, rcu_head);
|
||||
}
|
||||
|
||||
struct nfs4_state *
|
||||
@ -707,9 +707,9 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
|
||||
struct nfs4_state *state, *new;
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
rcu_read_lock();
|
||||
state = __nfs4_find_state_byowner(inode, owner);
|
||||
spin_unlock(&inode->i_lock);
|
||||
rcu_read_unlock();
|
||||
if (state)
|
||||
goto out;
|
||||
new = nfs4_alloc_open_state();
|
||||
@ -720,7 +720,7 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
|
||||
state = new;
|
||||
state->owner = owner;
|
||||
atomic_inc(&owner->so_count);
|
||||
list_add(&state->inode_states, &nfsi->open_states);
|
||||
list_add_rcu(&state->inode_states, &nfsi->open_states);
|
||||
ihold(inode);
|
||||
state->inode = inode;
|
||||
spin_unlock(&inode->i_lock);
|
||||
@ -743,10 +743,10 @@ void nfs4_put_open_state(struct nfs4_state *state)
|
||||
struct inode *inode = state->inode;
|
||||
struct nfs4_state_owner *owner = state->owner;
|
||||
|
||||
if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
|
||||
if (!refcount_dec_and_lock(&state->count, &owner->so_lock))
|
||||
return;
|
||||
spin_lock(&inode->i_lock);
|
||||
list_del(&state->inode_states);
|
||||
list_del_rcu(&state->inode_states);
|
||||
list_del(&state->open_states);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&owner->so_lock);
|
||||
@ -1437,8 +1437,8 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
|
||||
struct nfs4_state *state;
|
||||
bool found = false;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
list_for_each_entry(ctx, &nfsi->open_files, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
|
||||
state = ctx->state;
|
||||
if (state == NULL)
|
||||
continue;
|
||||
@ -1456,7 +1456,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
|
||||
nfs4_state_mark_reclaim_nograce(clp, state))
|
||||
found = true;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
nfs_inode_find_delegation_state_and_recover(inode, stateid);
|
||||
if (found)
|
||||
@ -1469,13 +1469,13 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
struct nfs_open_context *ctx;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
list_for_each_entry(ctx, &nfsi->open_files, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
|
||||
if (ctx->state != state)
|
||||
continue;
|
||||
set_bit(NFS_CONTEXT_BAD, &ctx->flags);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
|
||||
@ -1549,10 +1549,62 @@ out:
|
||||
return status;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NFS_V4_2
|
||||
static void nfs42_complete_copies(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
||||
{
|
||||
struct nfs4_copy_state *copy;
|
||||
|
||||
if (!test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags))
|
||||
return;
|
||||
|
||||
spin_lock(&sp->so_server->nfs_client->cl_lock);
|
||||
list_for_each_entry(copy, &sp->so_server->ss_copies, copies) {
|
||||
if (nfs4_stateid_match_other(&state->stateid, ©->parent_state->stateid))
|
||||
continue;
|
||||
copy->flags = 1;
|
||||
complete(©->completion);
|
||||
break;
|
||||
}
|
||||
spin_unlock(&sp->so_server->nfs_client->cl_lock);
|
||||
}
|
||||
#else /* !CONFIG_NFS_V4_2 */
|
||||
static inline void nfs42_complete_copies(struct nfs4_state_owner *sp,
|
||||
struct nfs4_state *state)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NFS_V4_2 */
|
||||
|
||||
static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_state *state,
|
||||
const struct nfs4_state_recovery_ops *ops)
|
||||
{
|
||||
struct nfs4_lock_state *lock;
|
||||
int status;
|
||||
|
||||
status = ops->recover_open(sp, state);
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
status = nfs4_reclaim_locks(state, ops);
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
||||
spin_lock(&state->state_lock);
|
||||
list_for_each_entry(lock, &state->lock_states, ls_locks) {
|
||||
if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
|
||||
pr_warn_ratelimited("NFS: %s: Lock reclaim failed!\n", __func__);
|
||||
}
|
||||
spin_unlock(&state->state_lock);
|
||||
}
|
||||
|
||||
nfs42_complete_copies(sp, state);
|
||||
clear_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
|
||||
{
|
||||
struct nfs4_state *state;
|
||||
struct nfs4_lock_state *lock;
|
||||
int status = 0;
|
||||
|
||||
/* Note: we rely on the sp->so_states list being ordered
|
||||
@ -1573,79 +1625,45 @@ restart:
|
||||
continue;
|
||||
if (state->state == 0)
|
||||
continue;
|
||||
atomic_inc(&state->count);
|
||||
refcount_inc(&state->count);
|
||||
spin_unlock(&sp->so_lock);
|
||||
status = ops->recover_open(sp, state);
|
||||
if (status >= 0) {
|
||||
status = nfs4_reclaim_locks(state, ops);
|
||||
if (status >= 0) {
|
||||
if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
||||
spin_lock(&state->state_lock);
|
||||
list_for_each_entry(lock, &state->lock_states, ls_locks) {
|
||||
if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
|
||||
pr_warn_ratelimited("NFS: "
|
||||
"%s: Lock reclaim "
|
||||
"failed!\n", __func__);
|
||||
}
|
||||
spin_unlock(&state->state_lock);
|
||||
}
|
||||
clear_bit(NFS_STATE_RECLAIM_NOGRACE,
|
||||
&state->flags);
|
||||
#ifdef CONFIG_NFS_V4_2
|
||||
if (test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags)) {
|
||||
struct nfs4_copy_state *copy;
|
||||
status = __nfs4_reclaim_open_state(sp, state, ops);
|
||||
|
||||
spin_lock(&sp->so_server->nfs_client->cl_lock);
|
||||
list_for_each_entry(copy, &sp->so_server->ss_copies, copies) {
|
||||
if (memcmp(&state->stateid.other, ©->parent_state->stateid.other, NFS4_STATEID_SIZE))
|
||||
continue;
|
||||
copy->flags = 1;
|
||||
complete(©->completion);
|
||||
printk("AGLO: server rebooted waking up the copy\n");
|
||||
break;
|
||||
}
|
||||
spin_unlock(&sp->so_server->nfs_client->cl_lock);
|
||||
}
|
||||
#endif /* CONFIG_NFS_V4_2 */
|
||||
nfs4_put_open_state(state);
|
||||
spin_lock(&sp->so_lock);
|
||||
goto restart;
|
||||
}
|
||||
}
|
||||
switch (status) {
|
||||
default:
|
||||
printk(KERN_ERR "NFS: %s: unhandled error %d\n",
|
||||
__func__, status);
|
||||
/* Fall through */
|
||||
case -ENOENT:
|
||||
case -ENOMEM:
|
||||
case -EACCES:
|
||||
case -EROFS:
|
||||
case -EIO:
|
||||
case -ESTALE:
|
||||
/* Open state on this file cannot be recovered */
|
||||
nfs4_state_mark_recovery_failed(state, status);
|
||||
default:
|
||||
if (status >= 0)
|
||||
break;
|
||||
case -EAGAIN:
|
||||
ssleep(1);
|
||||
/* Fall through */
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_RECLAIM_BAD:
|
||||
case -NFS4ERR_RECLAIM_CONFLICT:
|
||||
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
|
||||
break;
|
||||
case -NFS4ERR_EXPIRED:
|
||||
case -NFS4ERR_NO_GRACE:
|
||||
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_BADSESSION:
|
||||
case -NFS4ERR_BADSLOT:
|
||||
case -NFS4ERR_BAD_HIGH_SLOT:
|
||||
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
||||
goto out_err;
|
||||
printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
|
||||
/* Fall through */
|
||||
case -ENOENT:
|
||||
case -ENOMEM:
|
||||
case -EACCES:
|
||||
case -EROFS:
|
||||
case -EIO:
|
||||
case -ESTALE:
|
||||
/* Open state on this file cannot be recovered */
|
||||
nfs4_state_mark_recovery_failed(state, status);
|
||||
break;
|
||||
case -EAGAIN:
|
||||
ssleep(1);
|
||||
/* Fall through */
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_RECLAIM_BAD:
|
||||
case -NFS4ERR_RECLAIM_CONFLICT:
|
||||
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
|
||||
break;
|
||||
case -NFS4ERR_EXPIRED:
|
||||
case -NFS4ERR_NO_GRACE:
|
||||
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
case -NFS4ERR_BADSESSION:
|
||||
case -NFS4ERR_BADSLOT:
|
||||
case -NFS4ERR_BAD_HIGH_SLOT:
|
||||
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
||||
goto out_err;
|
||||
}
|
||||
nfs4_put_open_state(state);
|
||||
spin_lock(&sp->so_lock);
|
||||
@ -1795,38 +1813,38 @@ static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
|
||||
static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
|
||||
{
|
||||
switch (error) {
|
||||
case 0:
|
||||
break;
|
||||
case -NFS4ERR_CB_PATH_DOWN:
|
||||
nfs40_handle_cb_pathdown(clp);
|
||||
break;
|
||||
case -NFS4ERR_NO_GRACE:
|
||||
nfs4_state_end_reclaim_reboot(clp);
|
||||
break;
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
nfs4_state_start_reclaim_reboot(clp);
|
||||
break;
|
||||
case -NFS4ERR_EXPIRED:
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
nfs4_state_start_reclaim_nograce(clp);
|
||||
break;
|
||||
case -NFS4ERR_BADSESSION:
|
||||
case -NFS4ERR_BADSLOT:
|
||||
case -NFS4ERR_BAD_HIGH_SLOT:
|
||||
case -NFS4ERR_DEADSESSION:
|
||||
case -NFS4ERR_SEQ_FALSE_RETRY:
|
||||
case -NFS4ERR_SEQ_MISORDERED:
|
||||
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
|
||||
/* Zero session reset errors */
|
||||
break;
|
||||
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
||||
set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
|
||||
break;
|
||||
default:
|
||||
dprintk("%s: failed to handle error %d for server %s\n",
|
||||
__func__, error, clp->cl_hostname);
|
||||
return error;
|
||||
case 0:
|
||||
break;
|
||||
case -NFS4ERR_CB_PATH_DOWN:
|
||||
nfs40_handle_cb_pathdown(clp);
|
||||
break;
|
||||
case -NFS4ERR_NO_GRACE:
|
||||
nfs4_state_end_reclaim_reboot(clp);
|
||||
break;
|
||||
case -NFS4ERR_STALE_CLIENTID:
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
nfs4_state_start_reclaim_reboot(clp);
|
||||
break;
|
||||
case -NFS4ERR_EXPIRED:
|
||||
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
|
||||
nfs4_state_start_reclaim_nograce(clp);
|
||||
break;
|
||||
case -NFS4ERR_BADSESSION:
|
||||
case -NFS4ERR_BADSLOT:
|
||||
case -NFS4ERR_BAD_HIGH_SLOT:
|
||||
case -NFS4ERR_DEADSESSION:
|
||||
case -NFS4ERR_SEQ_FALSE_RETRY:
|
||||
case -NFS4ERR_SEQ_MISORDERED:
|
||||
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
|
||||
/* Zero session reset errors */
|
||||
break;
|
||||
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
||||
set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
|
||||
break;
|
||||
default:
|
||||
dprintk("%s: failed to handle error %d for server %s\n",
|
||||
__func__, error, clp->cl_hostname);
|
||||
return error;
|
||||
}
|
||||
dprintk("%s: handled error %d for server %s\n", __func__, error,
|
||||
clp->cl_hostname);
|
||||
|
@ -3516,7 +3516,7 @@ static int decode_attr_exclcreat_supported(struct xdr_stream *xdr,
|
||||
static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh)
|
||||
{
|
||||
__be32 *p;
|
||||
int len;
|
||||
u32 len;
|
||||
|
||||
if (fh != NULL)
|
||||
memset(fh, 0, sizeof(*fh));
|
||||
|
@ -63,14 +63,14 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init);
|
||||
|
||||
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
|
||||
{
|
||||
spin_lock(&hdr->lock);
|
||||
if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
|
||||
|| pos < hdr->io_start + hdr->good_bytes) {
|
||||
unsigned int new = pos - hdr->io_start;
|
||||
|
||||
if (hdr->good_bytes > new) {
|
||||
hdr->good_bytes = new;
|
||||
clear_bit(NFS_IOHDR_EOF, &hdr->flags);
|
||||
hdr->good_bytes = pos - hdr->io_start;
|
||||
hdr->error = error;
|
||||
if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
|
||||
hdr->error = error;
|
||||
}
|
||||
spin_unlock(&hdr->lock);
|
||||
}
|
||||
|
||||
static inline struct nfs_page *
|
||||
@ -494,7 +494,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
|
||||
|
||||
if (hdr) {
|
||||
INIT_LIST_HEAD(&hdr->pages);
|
||||
spin_lock_init(&hdr->lock);
|
||||
hdr->rw_ops = ops;
|
||||
}
|
||||
return hdr;
|
||||
@ -1111,6 +1110,20 @@ static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
|
||||
{
|
||||
u32 midx;
|
||||
struct nfs_pgio_mirror *mirror;
|
||||
|
||||
if (!desc->pg_error)
|
||||
return;
|
||||
|
||||
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
|
||||
mirror = &desc->pg_mirrors[midx];
|
||||
desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
|
||||
}
|
||||
}
|
||||
|
||||
int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
||||
struct nfs_page *req)
|
||||
{
|
||||
@ -1161,25 +1174,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
||||
return 1;
|
||||
|
||||
out_failed:
|
||||
/*
|
||||
* We might have failed before sending any reqs over wire.
|
||||
* Clean up rest of the reqs in mirror pg_list.
|
||||
*/
|
||||
if (desc->pg_error) {
|
||||
struct nfs_pgio_mirror *mirror;
|
||||
void (*func)(struct list_head *);
|
||||
|
||||
/* remember fatal errors */
|
||||
if (nfs_error_is_fatal(desc->pg_error))
|
||||
nfs_context_set_write_error(req->wb_context,
|
||||
desc->pg_error);
|
||||
|
||||
func = desc->pg_completion_ops->error_cleanup;
|
||||
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
|
||||
mirror = &desc->pg_mirrors[midx];
|
||||
func(&mirror->pg_list);
|
||||
}
|
||||
}
|
||||
nfs_pageio_error_cleanup(desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1251,6 +1246,8 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
|
||||
for (midx = 0; midx < desc->pg_mirror_count; midx++)
|
||||
nfs_pageio_complete_mirror(desc, midx);
|
||||
|
||||
if (desc->pg_error < 0)
|
||||
nfs_pageio_error_cleanup(desc);
|
||||
if (desc->pg_ops->pg_cleanup)
|
||||
desc->pg_ops->pg_cleanup(desc);
|
||||
nfs_pageio_cleanup_mirroring(desc);
|
||||
|
@ -965,7 +965,7 @@ static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
|
||||
struct page **pages;
|
||||
int i;
|
||||
|
||||
pages = kcalloc(size, sizeof(struct page *), gfp_flags);
|
||||
pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
|
||||
if (!pages) {
|
||||
dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
|
||||
return NULL;
|
||||
@ -975,7 +975,7 @@ static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
|
||||
pages[i] = alloc_page(gfp_flags);
|
||||
if (!pages[i]) {
|
||||
dprintk("%s: failed to allocate page\n", __func__);
|
||||
nfs4_free_pages(pages, size);
|
||||
nfs4_free_pages(pages, i);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -991,6 +991,7 @@ pnfs_alloc_init_layoutget_args(struct inode *ino,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
struct nfs_server *server = pnfs_find_server(ino, ctx);
|
||||
size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
|
||||
size_t max_pages = max_response_pages(server);
|
||||
struct nfs4_layoutget *lgp;
|
||||
|
||||
@ -1000,6 +1001,12 @@ pnfs_alloc_init_layoutget_args(struct inode *ino,
|
||||
if (lgp == NULL)
|
||||
return NULL;
|
||||
|
||||
if (max_reply_sz) {
|
||||
size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (npages < max_pages)
|
||||
max_pages = npages;
|
||||
}
|
||||
|
||||
lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
|
||||
if (!lgp->args.layout.pages) {
|
||||
kfree(lgp);
|
||||
@ -1332,6 +1339,7 @@ bool pnfs_roc(struct inode *ino,
|
||||
if (!nfs_have_layout(ino))
|
||||
return false;
|
||||
retry:
|
||||
rcu_read_lock();
|
||||
spin_lock(&ino->i_lock);
|
||||
lo = nfsi->layout;
|
||||
if (!lo || !pnfs_layout_is_valid(lo) ||
|
||||
@ -1342,6 +1350,7 @@ retry:
|
||||
pnfs_get_layout_hdr(lo);
|
||||
if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
|
||||
spin_unlock(&ino->i_lock);
|
||||
rcu_read_unlock();
|
||||
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
pnfs_put_layout_hdr(lo);
|
||||
@ -1355,7 +1364,7 @@ retry:
|
||||
skip_read = true;
|
||||
}
|
||||
|
||||
list_for_each_entry(ctx, &nfsi->open_files, list) {
|
||||
list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
|
||||
state = ctx->state;
|
||||
if (state == NULL)
|
||||
continue;
|
||||
@ -1403,6 +1412,7 @@ retry:
|
||||
|
||||
out_noroc:
|
||||
spin_unlock(&ino->i_lock);
|
||||
rcu_read_unlock();
|
||||
pnfs_layoutcommit_inode(ino, true);
|
||||
if (roc) {
|
||||
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
|
||||
|
@ -125,6 +125,7 @@ struct pnfs_layoutdriver_type {
|
||||
struct module *owner;
|
||||
unsigned flags;
|
||||
unsigned max_deviceinfo_size;
|
||||
unsigned max_layoutget_response;
|
||||
|
||||
int (*set_layoutdriver) (struct nfs_server *, const struct nfs_fh *);
|
||||
int (*clear_layoutdriver) (struct nfs_server *);
|
||||
|
@ -276,16 +276,14 @@ static void nfs_readpage_result(struct rpc_task *task,
|
||||
struct nfs_pgio_header *hdr)
|
||||
{
|
||||
if (hdr->res.eof) {
|
||||
loff_t bound;
|
||||
loff_t pos = hdr->args.offset + hdr->res.count;
|
||||
unsigned int new = pos - hdr->io_start;
|
||||
|
||||
bound = hdr->args.offset + hdr->res.count;
|
||||
spin_lock(&hdr->lock);
|
||||
if (bound < hdr->io_start + hdr->good_bytes) {
|
||||
if (hdr->good_bytes > new) {
|
||||
hdr->good_bytes = new;
|
||||
set_bit(NFS_IOHDR_EOF, &hdr->flags);
|
||||
clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
|
||||
hdr->good_bytes = bound - hdr->io_start;
|
||||
}
|
||||
spin_unlock(&hdr->lock);
|
||||
} else if (hdr->res.count < hdr->args.count)
|
||||
nfs_readpage_retry(task, hdr);
|
||||
}
|
||||
|
@ -62,6 +62,7 @@ struct nfs_lock_context {
|
||||
struct nfs_open_context *open_context;
|
||||
fl_owner_t lockowner;
|
||||
atomic_t io_count;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
struct nfs4_state;
|
||||
@ -82,6 +83,7 @@ struct nfs_open_context {
|
||||
|
||||
struct list_head list;
|
||||
struct nfs4_threshold *mdsthreshold;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
struct nfs_open_dir_context {
|
||||
|
@ -228,6 +228,9 @@ struct nfs_server {
|
||||
unsigned short mountd_port;
|
||||
unsigned short mountd_protocol;
|
||||
struct rpc_wait_queue uoc_rpcwaitq;
|
||||
|
||||
/* XDR related information */
|
||||
unsigned int read_hdrsize;
|
||||
};
|
||||
|
||||
/* Server capabilities */
|
||||
|
@ -608,8 +608,13 @@ struct nfs_pgio_args {
|
||||
__u32 count;
|
||||
unsigned int pgbase;
|
||||
struct page ** pages;
|
||||
const u32 * bitmask; /* used by write */
|
||||
enum nfs3_stable_how stable; /* used by write */
|
||||
union {
|
||||
unsigned int replen; /* used by read */
|
||||
struct {
|
||||
const u32 * bitmask; /* used by write */
|
||||
enum nfs3_stable_how stable; /* used by write */
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct nfs_pgio_res {
|
||||
@ -617,10 +622,16 @@ struct nfs_pgio_res {
|
||||
struct nfs_fattr * fattr;
|
||||
__u32 count;
|
||||
__u32 op_status;
|
||||
int eof; /* used by read */
|
||||
struct nfs_writeverf * verf; /* used by write */
|
||||
const struct nfs_server *server; /* used by write */
|
||||
|
||||
union {
|
||||
struct {
|
||||
unsigned int replen; /* used by read */
|
||||
int eof; /* used by read */
|
||||
};
|
||||
struct {
|
||||
struct nfs_writeverf * verf; /* used by write */
|
||||
const struct nfs_server *server; /* used by write */
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1471,11 +1482,10 @@ struct nfs_pgio_header {
|
||||
const struct nfs_rw_ops *rw_ops;
|
||||
struct nfs_io_completion *io_completion;
|
||||
struct nfs_direct_req *dreq;
|
||||
spinlock_t lock;
|
||||
/* fields protected by lock */
|
||||
|
||||
int pnfs_error;
|
||||
int error; /* merge with pnfs_error */
|
||||
unsigned long good_bytes; /* boundary of good data */
|
||||
unsigned int good_bytes; /* boundary of good data */
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
|
@ -67,7 +67,7 @@ struct rpc_cred {
|
||||
const struct rpc_credops *cr_ops;
|
||||
unsigned long cr_expire; /* when to gc */
|
||||
unsigned long cr_flags; /* various flags */
|
||||
atomic_t cr_count; /* ref count */
|
||||
refcount_t cr_count; /* ref count */
|
||||
|
||||
kuid_t cr_uid;
|
||||
|
||||
@ -100,7 +100,7 @@ struct rpc_auth {
|
||||
* differ from the flavor in
|
||||
* au_ops->au_flavor in gss
|
||||
* case) */
|
||||
atomic_t au_count; /* Reference counter */
|
||||
refcount_t au_count; /* Reference counter */
|
||||
|
||||
struct rpc_cred_cache * au_credcache;
|
||||
/* per-flavor data */
|
||||
@ -157,6 +157,7 @@ struct rpc_credops {
|
||||
int (*crkey_timeout)(struct rpc_cred *);
|
||||
bool (*crkey_to_expire)(struct rpc_cred *);
|
||||
char * (*crstringify_acceptor)(struct rpc_cred *);
|
||||
bool (*crneed_reencode)(struct rpc_task *);
|
||||
};
|
||||
|
||||
extern const struct rpc_authops authunix_ops;
|
||||
@ -192,6 +193,7 @@ __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
|
||||
__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
|
||||
int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
|
||||
int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
|
||||
bool rpcauth_xmit_need_reencode(struct rpc_task *task);
|
||||
int rpcauth_refreshcred(struct rpc_task *);
|
||||
void rpcauth_invalcred(struct rpc_task *);
|
||||
int rpcauth_uptodatecred(struct rpc_task *);
|
||||
@ -204,11 +206,11 @@ bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *);
|
||||
char * rpcauth_stringify_acceptor(struct rpc_cred *);
|
||||
|
||||
static inline
|
||||
struct rpc_cred * get_rpccred(struct rpc_cred *cred)
|
||||
struct rpc_cred *get_rpccred(struct rpc_cred *cred)
|
||||
{
|
||||
if (cred != NULL)
|
||||
atomic_inc(&cred->cr_count);
|
||||
return cred;
|
||||
if (cred != NULL && refcount_inc_not_zero(&cred->cr_count))
|
||||
return cred;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -224,9 +226,7 @@ struct rpc_cred * get_rpccred(struct rpc_cred *cred)
|
||||
static inline struct rpc_cred *
|
||||
get_rpccred_rcu(struct rpc_cred *cred)
|
||||
{
|
||||
if (atomic_inc_not_zero(&cred->cr_count))
|
||||
return cred;
|
||||
return NULL;
|
||||
return get_rpccred(cred);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -70,6 +70,7 @@ struct gss_cl_ctx {
|
||||
refcount_t count;
|
||||
enum rpc_gss_proc gc_proc;
|
||||
u32 gc_seq;
|
||||
u32 gc_seq_xmit;
|
||||
spinlock_t gc_seq_lock;
|
||||
struct gss_ctx *gc_gss_ctx;
|
||||
struct xdr_netobj gc_wire_ctx;
|
||||
|
@ -34,6 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#ifdef CONFIG_SUNRPC_BACKCHANNEL
|
||||
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
|
||||
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
|
||||
void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
|
||||
void xprt_free_bc_request(struct rpc_rqst *req);
|
||||
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
|
||||
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
|
||||
|
@ -118,7 +118,8 @@ struct krb5_ctx {
|
||||
u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
|
||||
};
|
||||
|
||||
extern spinlock_t krb5_seq_lock;
|
||||
extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx);
|
||||
extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx);
|
||||
|
||||
/* The length of the Kerberos GSS token header */
|
||||
#define GSS_KRB5_TOK_HDR_LEN (16)
|
||||
|
@ -140,8 +140,9 @@ struct rpc_task_setup {
|
||||
#define RPC_TASK_RUNNING 0
|
||||
#define RPC_TASK_QUEUED 1
|
||||
#define RPC_TASK_ACTIVE 2
|
||||
#define RPC_TASK_MSG_RECV 3
|
||||
#define RPC_TASK_MSG_RECV_WAIT 4
|
||||
#define RPC_TASK_NEED_XMIT 3
|
||||
#define RPC_TASK_NEED_RECV 4
|
||||
#define RPC_TASK_MSG_PIN_WAIT 5
|
||||
|
||||
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
|
||||
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
|
||||
@ -188,7 +189,6 @@ struct rpc_timer {
|
||||
struct rpc_wait_queue {
|
||||
spinlock_t lock;
|
||||
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
|
||||
pid_t owner; /* process id of last task serviced */
|
||||
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
|
||||
unsigned char priority; /* current priority */
|
||||
unsigned char nr; /* # tasks remaining for cookie */
|
||||
@ -204,7 +204,6 @@ struct rpc_wait_queue {
|
||||
* from a single cookie. The aim is to improve
|
||||
* performance of NFS operations such as read/write.
|
||||
*/
|
||||
#define RPC_BATCH_COUNT 16
|
||||
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
|
||||
|
||||
/*
|
||||
@ -234,6 +233,9 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
|
||||
struct rpc_task *task);
|
||||
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
|
||||
struct rpc_task *);
|
||||
void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *,
|
||||
struct rpc_task *,
|
||||
int);
|
||||
void rpc_wake_up(struct rpc_wait_queue *);
|
||||
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
|
||||
struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
|
||||
|
@ -84,7 +84,6 @@ struct svc_xprt {
|
||||
struct sockaddr_storage xpt_remote; /* remote peer's address */
|
||||
size_t xpt_remotelen; /* length of address */
|
||||
char xpt_remotebuf[INET6_ADDRSTRLEN + 10];
|
||||
struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
|
||||
struct list_head xpt_users; /* callbacks on free */
|
||||
|
||||
struct net *xpt_net;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
struct bio_vec;
|
||||
struct rpc_rqst;
|
||||
|
||||
/*
|
||||
@ -52,12 +53,14 @@ struct xdr_buf {
|
||||
struct kvec head[1], /* RPC header + non-page data */
|
||||
tail[1]; /* Appended after page data */
|
||||
|
||||
struct bio_vec *bvec;
|
||||
struct page ** pages; /* Array of pages */
|
||||
unsigned int page_base, /* Start of page data */
|
||||
page_len, /* Length of page data */
|
||||
flags; /* Flags for data disposition */
|
||||
#define XDRBUF_READ 0x01 /* target of file read */
|
||||
#define XDRBUF_WRITE 0x02 /* source of file write */
|
||||
#define XDRBUF_SPARSE_PAGES 0x04 /* Page array is sparse */
|
||||
|
||||
unsigned int buflen, /* Total length of storage buffer */
|
||||
len; /* Length of XDR encoded message */
|
||||
@ -69,6 +72,8 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
|
||||
buf->head[0].iov_base = start;
|
||||
buf->head[0].iov_len = len;
|
||||
buf->tail[0].iov_len = 0;
|
||||
buf->bvec = NULL;
|
||||
buf->pages = NULL;
|
||||
buf->page_len = 0;
|
||||
buf->flags = 0;
|
||||
buf->len = 0;
|
||||
@ -115,6 +120,9 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
|
||||
void xdr_inline_pages(struct xdr_buf *, unsigned int,
|
||||
struct page **, unsigned int, unsigned int);
|
||||
void xdr_terminate_string(struct xdr_buf *, const u32);
|
||||
size_t xdr_buf_pagecount(struct xdr_buf *buf);
|
||||
int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
|
||||
void xdr_free_bvec(struct xdr_buf *buf);
|
||||
|
||||
static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
|
||||
{
|
||||
@ -177,10 +185,7 @@ struct xdr_skb_reader {
|
||||
|
||||
typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
|
||||
|
||||
size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len);
|
||||
extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
|
||||
extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
|
||||
struct xdr_skb_reader *, xdr_skb_read_actor);
|
||||
|
||||
extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
|
||||
extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
|
||||
|
@ -82,7 +82,14 @@ struct rpc_rqst {
|
||||
struct page **rq_enc_pages; /* scratch pages for use by
|
||||
gss privacy code */
|
||||
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
|
||||
struct list_head rq_list;
|
||||
|
||||
union {
|
||||
struct list_head rq_list; /* Slot allocation list */
|
||||
struct rb_node rq_recv; /* Receive queue */
|
||||
};
|
||||
|
||||
struct list_head rq_xmit; /* Send queue */
|
||||
struct list_head rq_xmit2; /* Send queue */
|
||||
|
||||
void *rq_buffer; /* Call XDR encode buffer */
|
||||
size_t rq_callsize;
|
||||
@ -103,6 +110,7 @@ struct rpc_rqst {
|
||||
/* A cookie used to track the
|
||||
state of the transport
|
||||
connection */
|
||||
atomic_t rq_pin;
|
||||
|
||||
/*
|
||||
* Partial send handling
|
||||
@ -133,7 +141,8 @@ struct rpc_xprt_ops {
|
||||
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
int (*buf_alloc)(struct rpc_task *task);
|
||||
void (*buf_free)(struct rpc_task *task);
|
||||
int (*send_request)(struct rpc_task *task);
|
||||
void (*prepare_request)(struct rpc_rqst *req);
|
||||
int (*send_request)(struct rpc_rqst *req);
|
||||
void (*set_retrans_timeout)(struct rpc_task *task);
|
||||
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void (*release_request)(struct rpc_task *task);
|
||||
@ -234,9 +243,12 @@ struct rpc_xprt {
|
||||
*/
|
||||
spinlock_t transport_lock; /* lock transport info */
|
||||
spinlock_t reserve_lock; /* lock slot table */
|
||||
spinlock_t recv_lock; /* lock receive list */
|
||||
spinlock_t queue_lock; /* send/receive queue lock */
|
||||
u32 xid; /* Next XID value to use */
|
||||
struct rpc_task * snd_task; /* Task blocked in send */
|
||||
|
||||
struct list_head xmit_queue; /* Send queue */
|
||||
|
||||
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
|
||||
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
||||
struct svc_serv *bc_serv; /* The RPC service which will */
|
||||
@ -248,7 +260,8 @@ struct rpc_xprt {
|
||||
struct list_head bc_pa_list; /* List of preallocated
|
||||
* backchannel rpc_rqst's */
|
||||
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
|
||||
struct list_head recv;
|
||||
|
||||
struct rb_root recv_queue; /* Receive queue */
|
||||
|
||||
struct {
|
||||
unsigned long bind_count, /* total number of binds */
|
||||
@ -325,15 +338,18 @@ struct xprt_class {
|
||||
struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
|
||||
void xprt_connect(struct rpc_task *task);
|
||||
void xprt_reserve(struct rpc_task *task);
|
||||
void xprt_request_init(struct rpc_task *task);
|
||||
void xprt_retry_reserve(struct rpc_task *task);
|
||||
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void xprt_free_slot(struct rpc_xprt *xprt,
|
||||
struct rpc_rqst *req);
|
||||
void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void xprt_request_prepare(struct rpc_rqst *req);
|
||||
bool xprt_prepare_transmit(struct rpc_task *task);
|
||||
void xprt_request_enqueue_transmit(struct rpc_task *task);
|
||||
void xprt_request_enqueue_receive(struct rpc_task *task);
|
||||
void xprt_request_wait_receive(struct rpc_task *task);
|
||||
bool xprt_request_need_retransmit(struct rpc_task *task);
|
||||
void xprt_transmit(struct rpc_task *task);
|
||||
void xprt_end_transmit(struct rpc_task *task);
|
||||
int xprt_adjust_timeout(struct rpc_rqst *req);
|
||||
@ -373,8 +389,8 @@ int xprt_load_transport(const char *);
|
||||
void xprt_set_retrans_timeout_def(struct rpc_task *task);
|
||||
void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
|
||||
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
|
||||
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
|
||||
void xprt_write_space(struct rpc_xprt *xprt);
|
||||
void xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
|
||||
bool xprt_write_space(struct rpc_xprt *xprt);
|
||||
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
|
||||
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
|
||||
void xprt_update_rtt(struct rpc_task *task);
|
||||
@ -382,6 +398,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied);
|
||||
void xprt_pin_rqst(struct rpc_rqst *req);
|
||||
void xprt_unpin_rqst(struct rpc_rqst *req);
|
||||
void xprt_release_rqst_cong(struct rpc_task *task);
|
||||
bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
|
||||
void xprt_disconnect_done(struct rpc_xprt *xprt);
|
||||
void xprt_force_disconnect(struct rpc_xprt *xprt);
|
||||
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
|
||||
@ -400,6 +417,8 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
|
||||
#define XPRT_BINDING (5)
|
||||
#define XPRT_CLOSING (6)
|
||||
#define XPRT_CONGESTED (9)
|
||||
#define XPRT_CWND_WAIT (10)
|
||||
#define XPRT_WRITE_SPACE (11)
|
||||
|
||||
static inline void xprt_set_connected(struct rpc_xprt *xprt)
|
||||
{
|
||||
|
@ -30,15 +30,25 @@ struct sock_xprt {
|
||||
/*
|
||||
* State of TCP reply receive
|
||||
*/
|
||||
__be32 tcp_fraghdr,
|
||||
tcp_xid,
|
||||
tcp_calldir;
|
||||
struct {
|
||||
struct {
|
||||
__be32 fraghdr,
|
||||
xid,
|
||||
calldir;
|
||||
} __attribute__((packed));
|
||||
|
||||
u32 tcp_offset,
|
||||
tcp_reclen;
|
||||
u32 offset,
|
||||
len;
|
||||
|
||||
unsigned long tcp_copied,
|
||||
tcp_flags;
|
||||
unsigned long copied;
|
||||
} recv;
|
||||
|
||||
/*
|
||||
* State of TCP transmit queue
|
||||
*/
|
||||
struct {
|
||||
u32 offset;
|
||||
} xmit;
|
||||
|
||||
/*
|
||||
* Connection of transports
|
||||
@ -67,21 +77,9 @@ struct sock_xprt {
|
||||
void (*old_error_report)(struct sock *);
|
||||
};
|
||||
|
||||
/*
|
||||
* TCP receive state flags
|
||||
*/
|
||||
#define TCP_RCV_LAST_FRAG (1UL << 0)
|
||||
#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
|
||||
#define TCP_RCV_COPY_XID (1UL << 2)
|
||||
#define TCP_RCV_COPY_DATA (1UL << 3)
|
||||
#define TCP_RCV_READ_CALLDIR (1UL << 4)
|
||||
#define TCP_RCV_COPY_CALLDIR (1UL << 5)
|
||||
|
||||
/*
|
||||
* TCP RPC flags
|
||||
*/
|
||||
#define TCP_RPC_REPLY (1UL << 6)
|
||||
|
||||
#define XPRT_SOCK_CONNECTING 1U
|
||||
#define XPRT_SOCK_DATA_READY (2)
|
||||
#define XPRT_SOCK_UPD_TIMEOUT (3)
|
||||
|
@ -263,7 +263,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
|
||||
);
|
||||
|
||||
#define DEFINE_MR_EVENT(name) \
|
||||
DEFINE_EVENT(xprtrdma_mr, name, \
|
||||
DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
|
||||
TP_PROTO( \
|
||||
const struct rpcrdma_mr *mr \
|
||||
), \
|
||||
@ -306,7 +306,7 @@ DECLARE_EVENT_CLASS(xprtrdma_cb_event,
|
||||
** Connection events
|
||||
**/
|
||||
|
||||
TRACE_EVENT(xprtrdma_conn_upcall,
|
||||
TRACE_EVENT(xprtrdma_cm_event,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_xprt *r_xprt,
|
||||
struct rdma_cm_event *event
|
||||
@ -377,7 +377,7 @@ DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
|
||||
DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
|
||||
DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
|
||||
|
||||
TRACE_EVENT(xprtrdma_qp_error,
|
||||
TRACE_EVENT(xprtrdma_qp_event,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_xprt *r_xprt,
|
||||
const struct ib_event *event
|
||||
@ -509,7 +509,7 @@ TRACE_EVENT(xprtrdma_post_send,
|
||||
TP_STRUCT__entry(
|
||||
__field(const void *, req)
|
||||
__field(int, num_sge)
|
||||
__field(bool, signaled)
|
||||
__field(int, signaled)
|
||||
__field(int, status)
|
||||
),
|
||||
|
||||
@ -651,11 +651,11 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
|
||||
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
|
||||
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
|
||||
|
||||
DEFINE_MR_EVENT(xprtrdma_localinv);
|
||||
DEFINE_MR_EVENT(xprtrdma_dma_map);
|
||||
DEFINE_MR_EVENT(xprtrdma_dma_unmap);
|
||||
DEFINE_MR_EVENT(xprtrdma_remoteinv);
|
||||
DEFINE_MR_EVENT(xprtrdma_recover_mr);
|
||||
DEFINE_MR_EVENT(localinv);
|
||||
DEFINE_MR_EVENT(map);
|
||||
DEFINE_MR_EVENT(unmap);
|
||||
DEFINE_MR_EVENT(remoteinv);
|
||||
DEFINE_MR_EVENT(recycle);
|
||||
|
||||
/**
|
||||
** Reply events
|
||||
|
@ -470,14 +470,14 @@ TRACE_EVENT(xprt_ping,
|
||||
__get_str(addr), __get_str(port), __entry->status)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xs_tcp_data_ready,
|
||||
TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total),
|
||||
TRACE_EVENT(xs_stream_read_data,
|
||||
TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),
|
||||
|
||||
TP_ARGS(xprt, err, total),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, err)
|
||||
__field(unsigned int, total)
|
||||
__field(ssize_t, err)
|
||||
__field(size_t, total)
|
||||
__string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] :
|
||||
"(null)")
|
||||
__string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] :
|
||||
@ -493,21 +493,11 @@ TRACE_EVENT(xs_tcp_data_ready,
|
||||
xprt->address_strings[RPC_DISPLAY_PORT] : "(null)");
|
||||
),
|
||||
|
||||
TP_printk("peer=[%s]:%s err=%d total=%u", __get_str(addr),
|
||||
TP_printk("peer=[%s]:%s err=%zd total=%zu", __get_str(addr),
|
||||
__get_str(port), __entry->err, __entry->total)
|
||||
);
|
||||
|
||||
#define rpc_show_sock_xprt_flags(flags) \
|
||||
__print_flags(flags, "|", \
|
||||
{ TCP_RCV_LAST_FRAG, "TCP_RCV_LAST_FRAG" }, \
|
||||
{ TCP_RCV_COPY_FRAGHDR, "TCP_RCV_COPY_FRAGHDR" }, \
|
||||
{ TCP_RCV_COPY_XID, "TCP_RCV_COPY_XID" }, \
|
||||
{ TCP_RCV_COPY_DATA, "TCP_RCV_COPY_DATA" }, \
|
||||
{ TCP_RCV_READ_CALLDIR, "TCP_RCV_READ_CALLDIR" }, \
|
||||
{ TCP_RCV_COPY_CALLDIR, "TCP_RCV_COPY_CALLDIR" }, \
|
||||
{ TCP_RPC_REPLY, "TCP_RPC_REPLY" })
|
||||
|
||||
TRACE_EVENT(xs_tcp_data_recv,
|
||||
TRACE_EVENT(xs_stream_read_request,
|
||||
TP_PROTO(struct sock_xprt *xs),
|
||||
|
||||
TP_ARGS(xs),
|
||||
@ -516,25 +506,22 @@ TRACE_EVENT(xs_tcp_data_recv,
|
||||
__string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR])
|
||||
__string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT])
|
||||
__field(u32, xid)
|
||||
__field(unsigned long, flags)
|
||||
__field(unsigned long, copied)
|
||||
__field(unsigned int, reclen)
|
||||
__field(unsigned long, offset)
|
||||
__field(unsigned int, offset)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]);
|
||||
__assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]);
|
||||
__entry->xid = be32_to_cpu(xs->tcp_xid);
|
||||
__entry->flags = xs->tcp_flags;
|
||||
__entry->copied = xs->tcp_copied;
|
||||
__entry->reclen = xs->tcp_reclen;
|
||||
__entry->offset = xs->tcp_offset;
|
||||
__entry->xid = be32_to_cpu(xs->recv.xid);
|
||||
__entry->copied = xs->recv.copied;
|
||||
__entry->reclen = xs->recv.len;
|
||||
__entry->offset = xs->recv.offset;
|
||||
),
|
||||
|
||||
TP_printk("peer=[%s]:%s xid=0x%08x flags=%s copied=%lu reclen=%u offset=%lu",
|
||||
TP_printk("peer=[%s]:%s xid=0x%08x copied=%lu reclen=%u offset=%u",
|
||||
__get_str(addr), __get_str(port), __entry->xid,
|
||||
rpc_show_sock_xprt_flags(__entry->flags),
|
||||
__entry->copied, __entry->reclen, __entry->offset)
|
||||
);
|
||||
|
||||
|
@ -30,10 +30,9 @@ struct rpc_cred_cache {
|
||||
|
||||
static unsigned int auth_hashbits = RPC_CREDCACHE_DEFAULT_HASHBITS;
|
||||
|
||||
static DEFINE_SPINLOCK(rpc_authflavor_lock);
|
||||
static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
|
||||
&authnull_ops, /* AUTH_NULL */
|
||||
&authunix_ops, /* AUTH_UNIX */
|
||||
static const struct rpc_authops __rcu *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
|
||||
[RPC_AUTH_NULL] = (const struct rpc_authops __force __rcu *)&authnull_ops,
|
||||
[RPC_AUTH_UNIX] = (const struct rpc_authops __force __rcu *)&authunix_ops,
|
||||
NULL, /* others can be loadable modules */
|
||||
};
|
||||
|
||||
@ -93,39 +92,65 @@ pseudoflavor_to_flavor(u32 flavor) {
|
||||
int
|
||||
rpcauth_register(const struct rpc_authops *ops)
|
||||
{
|
||||
const struct rpc_authops *old;
|
||||
rpc_authflavor_t flavor;
|
||||
int ret = -EPERM;
|
||||
|
||||
if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR)
|
||||
return -EINVAL;
|
||||
spin_lock(&rpc_authflavor_lock);
|
||||
if (auth_flavors[flavor] == NULL) {
|
||||
auth_flavors[flavor] = ops;
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
return ret;
|
||||
old = cmpxchg((const struct rpc_authops ** __force)&auth_flavors[flavor], NULL, ops);
|
||||
if (old == NULL || old == ops)
|
||||
return 0;
|
||||
return -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_register);
|
||||
|
||||
int
|
||||
rpcauth_unregister(const struct rpc_authops *ops)
|
||||
{
|
||||
const struct rpc_authops *old;
|
||||
rpc_authflavor_t flavor;
|
||||
int ret = -EPERM;
|
||||
|
||||
if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR)
|
||||
return -EINVAL;
|
||||
spin_lock(&rpc_authflavor_lock);
|
||||
if (auth_flavors[flavor] == ops) {
|
||||
auth_flavors[flavor] = NULL;
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
return ret;
|
||||
|
||||
old = cmpxchg((const struct rpc_authops ** __force)&auth_flavors[flavor], ops, NULL);
|
||||
if (old == ops || old == NULL)
|
||||
return 0;
|
||||
return -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_unregister);
|
||||
|
||||
static const struct rpc_authops *
|
||||
rpcauth_get_authops(rpc_authflavor_t flavor)
|
||||
{
|
||||
const struct rpc_authops *ops;
|
||||
|
||||
if (flavor >= RPC_AUTH_MAXFLAVOR)
|
||||
return NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(auth_flavors[flavor]);
|
||||
if (ops == NULL) {
|
||||
rcu_read_unlock();
|
||||
request_module("rpc-auth-%u", flavor);
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(auth_flavors[flavor]);
|
||||
if (ops == NULL)
|
||||
goto out;
|
||||
}
|
||||
if (!try_module_get(ops->owner))
|
||||
ops = NULL;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return ops;
|
||||
}
|
||||
|
||||
static void
|
||||
rpcauth_put_authops(const struct rpc_authops *ops)
|
||||
{
|
||||
module_put(ops->owner);
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcauth_get_pseudoflavor - check if security flavor is supported
|
||||
* @flavor: a security flavor
|
||||
@ -138,25 +163,16 @@ EXPORT_SYMBOL_GPL(rpcauth_unregister);
|
||||
rpc_authflavor_t
|
||||
rpcauth_get_pseudoflavor(rpc_authflavor_t flavor, struct rpcsec_gss_info *info)
|
||||
{
|
||||
const struct rpc_authops *ops;
|
||||
const struct rpc_authops *ops = rpcauth_get_authops(flavor);
|
||||
rpc_authflavor_t pseudoflavor;
|
||||
|
||||
ops = auth_flavors[flavor];
|
||||
if (ops == NULL)
|
||||
request_module("rpc-auth-%u", flavor);
|
||||
spin_lock(&rpc_authflavor_lock);
|
||||
ops = auth_flavors[flavor];
|
||||
if (ops == NULL || !try_module_get(ops->owner)) {
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
if (!ops)
|
||||
return RPC_AUTH_MAXFLAVOR;
|
||||
}
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
|
||||
pseudoflavor = flavor;
|
||||
if (ops->info2flavor != NULL)
|
||||
pseudoflavor = ops->info2flavor(info);
|
||||
|
||||
module_put(ops->owner);
|
||||
rpcauth_put_authops(ops);
|
||||
return pseudoflavor;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_get_pseudoflavor);
|
||||
@ -176,25 +192,15 @@ rpcauth_get_gssinfo(rpc_authflavor_t pseudoflavor, struct rpcsec_gss_info *info)
|
||||
const struct rpc_authops *ops;
|
||||
int result;
|
||||
|
||||
if (flavor >= RPC_AUTH_MAXFLAVOR)
|
||||
return -EINVAL;
|
||||
|
||||
ops = auth_flavors[flavor];
|
||||
ops = rpcauth_get_authops(flavor);
|
||||
if (ops == NULL)
|
||||
request_module("rpc-auth-%u", flavor);
|
||||
spin_lock(&rpc_authflavor_lock);
|
||||
ops = auth_flavors[flavor];
|
||||
if (ops == NULL || !try_module_get(ops->owner)) {
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
|
||||
result = -ENOENT;
|
||||
if (ops->flavor2info != NULL)
|
||||
result = ops->flavor2info(pseudoflavor, info);
|
||||
|
||||
module_put(ops->owner);
|
||||
rpcauth_put_authops(ops);
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpcauth_get_gssinfo);
|
||||
@ -212,15 +218,13 @@ EXPORT_SYMBOL_GPL(rpcauth_get_gssinfo);
|
||||
int
|
||||
rpcauth_list_flavors(rpc_authflavor_t *array, int size)
|
||||
{
|
||||
rpc_authflavor_t flavor;
|
||||
int result = 0;
|
||||
const struct rpc_authops *ops;
|
||||
rpc_authflavor_t flavor, pseudos[4];
|
||||
int i, len, result = 0;
|
||||
|
||||
spin_lock(&rpc_authflavor_lock);
|
||||
rcu_read_lock();
|
||||
for (flavor = 0; flavor < RPC_AUTH_MAXFLAVOR; flavor++) {
|
||||
const struct rpc_authops *ops = auth_flavors[flavor];
|
||||
rpc_authflavor_t pseudos[4];
|
||||
int i, len;
|
||||
|
||||
ops = rcu_dereference(auth_flavors[flavor]);
|
||||
if (result >= size) {
|
||||
result = -ENOMEM;
|
||||
break;
|
||||
@ -245,7 +249,7 @@ rpcauth_list_flavors(rpc_authflavor_t *array, int size)
|
||||
array[result++] = pseudos[i];
|
||||
}
|
||||
}
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
dprintk("RPC: %s returns %d\n", __func__, result);
|
||||
return result;
|
||||
@ -255,25 +259,17 @@ EXPORT_SYMBOL_GPL(rpcauth_list_flavors);
|
||||
struct rpc_auth *
|
||||
rpcauth_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
|
||||
{
|
||||
struct rpc_auth *auth;
|
||||
struct rpc_auth *auth = ERR_PTR(-EINVAL);
|
||||
const struct rpc_authops *ops;
|
||||
u32 flavor = pseudoflavor_to_flavor(args->pseudoflavor);
|
||||
u32 flavor = pseudoflavor_to_flavor(args->pseudoflavor);
|
||||
|
||||
auth = ERR_PTR(-EINVAL);
|
||||
if (flavor >= RPC_AUTH_MAXFLAVOR)
|
||||
ops = rpcauth_get_authops(flavor);
|
||||
if (ops == NULL)
|
||||
goto out;
|
||||
|
||||
if ((ops = auth_flavors[flavor]) == NULL)
|
||||
request_module("rpc-auth-%u", flavor);
|
||||
spin_lock(&rpc_authflavor_lock);
|
||||
ops = auth_flavors[flavor];
|
||||
if (ops == NULL || !try_module_get(ops->owner)) {
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
goto out;
|
||||
}
|
||||
spin_unlock(&rpc_authflavor_lock);
|
||||
auth = ops->create(args, clnt);
|
||||
module_put(ops->owner);
|
||||
|
||||
rpcauth_put_authops(ops);
|
||||
if (IS_ERR(auth))
|
||||
return auth;
|
||||
if (clnt->cl_auth)
|
||||
@ -288,32 +284,37 @@ EXPORT_SYMBOL_GPL(rpcauth_create);
|
||||
void
|
||||
rpcauth_release(struct rpc_auth *auth)
|
||||
{
|
||||
if (!atomic_dec_and_test(&auth->au_count))
|
||||
if (!refcount_dec_and_test(&auth->au_count))
|
||||
return;
|
||||
auth->au_ops->destroy(auth);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(rpc_credcache_lock);
|
||||
|
||||
static void
|
||||
/*
|
||||
* On success, the caller is responsible for freeing the reference
|
||||
* held by the hashtable
|
||||
*/
|
||||
static bool
|
||||
rpcauth_unhash_cred_locked(struct rpc_cred *cred)
|
||||
{
|
||||
if (!test_and_clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags))
|
||||
return false;
|
||||
hlist_del_rcu(&cred->cr_hash);
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
static bool
|
||||
rpcauth_unhash_cred(struct rpc_cred *cred)
|
||||
{
|
||||
spinlock_t *cache_lock;
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
if (!test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags))
|
||||
return false;
|
||||
cache_lock = &cred->cr_auth->au_credcache->lock;
|
||||
spin_lock(cache_lock);
|
||||
ret = atomic_read(&cred->cr_count) == 0;
|
||||
if (ret)
|
||||
rpcauth_unhash_cred_locked(cred);
|
||||
ret = rpcauth_unhash_cred_locked(cred);
|
||||
spin_unlock(cache_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -392,6 +393,44 @@ void rpcauth_destroy_credlist(struct list_head *head)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
rpcauth_lru_add_locked(struct rpc_cred *cred)
|
||||
{
|
||||
if (!list_empty(&cred->cr_lru))
|
||||
return;
|
||||
number_cred_unused++;
|
||||
list_add_tail(&cred->cr_lru, &cred_unused);
|
||||
}
|
||||
|
||||
static void
|
||||
rpcauth_lru_add(struct rpc_cred *cred)
|
||||
{
|
||||
if (!list_empty(&cred->cr_lru))
|
||||
return;
|
||||
spin_lock(&rpc_credcache_lock);
|
||||
rpcauth_lru_add_locked(cred);
|
||||
spin_unlock(&rpc_credcache_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
rpcauth_lru_remove_locked(struct rpc_cred *cred)
|
||||
{
|
||||
if (list_empty(&cred->cr_lru))
|
||||
return;
|
||||
number_cred_unused--;
|
||||
list_del_init(&cred->cr_lru);
|
||||
}
|
||||
|
||||
static void
|
||||
rpcauth_lru_remove(struct rpc_cred *cred)
|
||||
{
|
||||
if (list_empty(&cred->cr_lru))
|
||||
return;
|
||||
spin_lock(&rpc_credcache_lock);
|
||||
rpcauth_lru_remove_locked(cred);
|
||||
spin_unlock(&rpc_credcache_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the RPC credential cache, and delete those credentials
|
||||
* that are not referenced.
|
||||
@ -411,13 +450,10 @@ rpcauth_clear_credcache(struct rpc_cred_cache *cache)
|
||||
head = &cache->hashtable[i];
|
||||
while (!hlist_empty(head)) {
|
||||
cred = hlist_entry(head->first, struct rpc_cred, cr_hash);
|
||||
get_rpccred(cred);
|
||||
if (!list_empty(&cred->cr_lru)) {
|
||||
list_del(&cred->cr_lru);
|
||||
number_cred_unused--;
|
||||
}
|
||||
list_add_tail(&cred->cr_lru, &free);
|
||||
rpcauth_unhash_cred_locked(cred);
|
||||
/* Note: We now hold a reference to cred */
|
||||
rpcauth_lru_remove_locked(cred);
|
||||
list_add_tail(&cred->cr_lru, &free);
|
||||
}
|
||||
}
|
||||
spin_unlock(&cache->lock);
|
||||
@ -451,7 +487,6 @@ EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache);
|
||||
static long
|
||||
rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
|
||||
{
|
||||
spinlock_t *cache_lock;
|
||||
struct rpc_cred *cred, *next;
|
||||
unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM;
|
||||
long freed = 0;
|
||||
@ -460,32 +495,24 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
|
||||
|
||||
if (nr_to_scan-- == 0)
|
||||
break;
|
||||
if (refcount_read(&cred->cr_count) > 1) {
|
||||
rpcauth_lru_remove_locked(cred);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Enforce a 60 second garbage collection moratorium
|
||||
* Note that the cred_unused list must be time-ordered.
|
||||
*/
|
||||
if (time_in_range(cred->cr_expire, expired, jiffies) &&
|
||||
test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
|
||||
freed = SHRINK_STOP;
|
||||
break;
|
||||
}
|
||||
|
||||
list_del_init(&cred->cr_lru);
|
||||
number_cred_unused--;
|
||||
freed++;
|
||||
if (atomic_read(&cred->cr_count) != 0)
|
||||
if (!time_in_range(cred->cr_expire, expired, jiffies))
|
||||
continue;
|
||||
if (!rpcauth_unhash_cred(cred))
|
||||
continue;
|
||||
|
||||
cache_lock = &cred->cr_auth->au_credcache->lock;
|
||||
spin_lock(cache_lock);
|
||||
if (atomic_read(&cred->cr_count) == 0) {
|
||||
get_rpccred(cred);
|
||||
list_add_tail(&cred->cr_lru, free);
|
||||
rpcauth_unhash_cred_locked(cred);
|
||||
}
|
||||
spin_unlock(cache_lock);
|
||||
rpcauth_lru_remove_locked(cred);
|
||||
freed++;
|
||||
list_add_tail(&cred->cr_lru, free);
|
||||
}
|
||||
return freed;
|
||||
return freed ? freed : SHRINK_STOP;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
@ -561,19 +588,15 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
|
||||
if (!entry->cr_ops->crmatch(acred, entry, flags))
|
||||
continue;
|
||||
if (flags & RPCAUTH_LOOKUP_RCU) {
|
||||
if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) &&
|
||||
!test_bit(RPCAUTH_CRED_NEW, &entry->cr_flags))
|
||||
cred = entry;
|
||||
if (test_bit(RPCAUTH_CRED_NEW, &entry->cr_flags) ||
|
||||
refcount_read(&entry->cr_count) == 0)
|
||||
continue;
|
||||
cred = entry;
|
||||
break;
|
||||
}
|
||||
spin_lock(&cache->lock);
|
||||
if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) == 0) {
|
||||
spin_unlock(&cache->lock);
|
||||
continue;
|
||||
}
|
||||
cred = get_rpccred(entry);
|
||||
spin_unlock(&cache->lock);
|
||||
break;
|
||||
if (cred)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -594,11 +617,13 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
|
||||
if (!entry->cr_ops->crmatch(acred, entry, flags))
|
||||
continue;
|
||||
cred = get_rpccred(entry);
|
||||
break;
|
||||
if (cred)
|
||||
break;
|
||||
}
|
||||
if (cred == NULL) {
|
||||
cred = new;
|
||||
set_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
|
||||
refcount_inc(&cred->cr_count);
|
||||
hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]);
|
||||
} else
|
||||
list_add_tail(&new->cr_lru, &free);
|
||||
@ -645,7 +670,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
|
||||
{
|
||||
INIT_HLIST_NODE(&cred->cr_hash);
|
||||
INIT_LIST_HEAD(&cred->cr_lru);
|
||||
atomic_set(&cred->cr_count, 1);
|
||||
refcount_set(&cred->cr_count, 1);
|
||||
cred->cr_auth = auth;
|
||||
cred->cr_ops = ops;
|
||||
cred->cr_expire = jiffies;
|
||||
@ -713,36 +738,29 @@ put_rpccred(struct rpc_cred *cred)
|
||||
{
|
||||
if (cred == NULL)
|
||||
return;
|
||||
/* Fast path for unhashed credentials */
|
||||
if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
|
||||
if (atomic_dec_and_test(&cred->cr_count))
|
||||
cred->cr_ops->crdestroy(cred);
|
||||
return;
|
||||
rcu_read_lock();
|
||||
if (refcount_dec_and_test(&cred->cr_count))
|
||||
goto destroy;
|
||||
if (refcount_read(&cred->cr_count) != 1 ||
|
||||
!test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags))
|
||||
goto out;
|
||||
if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
|
||||
cred->cr_expire = jiffies;
|
||||
rpcauth_lru_add(cred);
|
||||
/* Race breaker */
|
||||
if (unlikely(!test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags)))
|
||||
rpcauth_lru_remove(cred);
|
||||
} else if (rpcauth_unhash_cred(cred)) {
|
||||
rpcauth_lru_remove(cred);
|
||||
if (refcount_dec_and_test(&cred->cr_count))
|
||||
goto destroy;
|
||||
}
|
||||
|
||||
if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock))
|
||||
return;
|
||||
if (!list_empty(&cred->cr_lru)) {
|
||||
number_cred_unused--;
|
||||
list_del_init(&cred->cr_lru);
|
||||
}
|
||||
if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
|
||||
if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
|
||||
cred->cr_expire = jiffies;
|
||||
list_add_tail(&cred->cr_lru, &cred_unused);
|
||||
number_cred_unused++;
|
||||
goto out_nodestroy;
|
||||
}
|
||||
if (!rpcauth_unhash_cred(cred)) {
|
||||
/* We were hashed and someone looked us up... */
|
||||
goto out_nodestroy;
|
||||
}
|
||||
}
|
||||
spin_unlock(&rpc_credcache_lock);
|
||||
cred->cr_ops->crdestroy(cred);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
out_nodestroy:
|
||||
spin_unlock(&rpc_credcache_lock);
|
||||
destroy:
|
||||
rcu_read_unlock();
|
||||
cred->cr_ops->crdestroy(cred);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(put_rpccred);
|
||||
|
||||
@ -817,6 +835,16 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
|
||||
return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
|
||||
}
|
||||
|
||||
bool
|
||||
rpcauth_xmit_need_reencode(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||
|
||||
if (!cred || !cred->cr_ops->crneed_reencode)
|
||||
return false;
|
||||
return cred->cr_ops->crneed_reencode(task);
|
||||
}
|
||||
|
||||
int
|
||||
rpcauth_refreshcred(struct rpc_task *task)
|
||||
{
|
||||
|
@ -274,7 +274,7 @@ static const struct rpc_authops generic_auth_ops = {
|
||||
|
||||
static struct rpc_auth generic_auth = {
|
||||
.au_ops = &generic_auth_ops,
|
||||
.au_count = ATOMIC_INIT(0),
|
||||
.au_count = REFCOUNT_INIT(1),
|
||||
};
|
||||
|
||||
static bool generic_key_to_expire(struct rpc_cred *cred)
|
||||
|
@ -1058,7 +1058,7 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
|
||||
auth->au_flavor = flavor;
|
||||
if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
|
||||
auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
|
||||
atomic_set(&auth->au_count, 1);
|
||||
refcount_set(&auth->au_count, 1);
|
||||
kref_init(&gss_auth->kref);
|
||||
|
||||
err = rpcauth_init_credcache(auth);
|
||||
@ -1187,7 +1187,7 @@ gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
|
||||
if (strcmp(gss_auth->target_name, args->target_name))
|
||||
continue;
|
||||
}
|
||||
if (!atomic_inc_not_zero(&gss_auth->rpc_auth.au_count))
|
||||
if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
|
||||
continue;
|
||||
goto out;
|
||||
}
|
||||
@ -1984,6 +1984,46 @@ gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
|
||||
return decode(rqstp, &xdr, obj);
|
||||
}
|
||||
|
||||
static bool
|
||||
gss_seq_is_newer(u32 new, u32 old)
|
||||
{
|
||||
return (s32)(new - old) > 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
gss_xmit_need_reencode(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
struct rpc_cred *cred = req->rq_cred;
|
||||
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
||||
u32 win, seq_xmit;
|
||||
bool ret = true;
|
||||
|
||||
if (!ctx)
|
||||
return true;
|
||||
|
||||
if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
|
||||
goto out;
|
||||
|
||||
seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
|
||||
while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
|
||||
u32 tmp = seq_xmit;
|
||||
|
||||
seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
|
||||
if (seq_xmit == tmp) {
|
||||
ret = false;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
win = ctx->gc_win;
|
||||
if (win > 0)
|
||||
ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
|
||||
out:
|
||||
gss_put_ctx(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
gss_unwrap_resp(struct rpc_task *task,
|
||||
kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
|
||||
@ -2052,6 +2092,7 @@ static const struct rpc_credops gss_credops = {
|
||||
.crunwrap_resp = gss_unwrap_resp,
|
||||
.crkey_timeout = gss_key_timeout,
|
||||
.crstringify_acceptor = gss_stringify_acceptor,
|
||||
.crneed_reencode = gss_xmit_need_reencode,
|
||||
};
|
||||
|
||||
static const struct rpc_credops gss_nullops = {
|
||||
|
@ -63,13 +63,12 @@
|
||||
#include <linux/sunrpc/gss_krb5.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
||||
# define RPCDBG_FACILITY RPCDBG_AUTH
|
||||
#endif
|
||||
|
||||
DEFINE_SPINLOCK(krb5_seq_lock);
|
||||
|
||||
static void *
|
||||
setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
|
||||
{
|
||||
@ -124,6 +123,30 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
|
||||
return krb5_hdr;
|
||||
}
|
||||
|
||||
u32
|
||||
gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx)
|
||||
{
|
||||
u32 old, seq_send = READ_ONCE(ctx->seq_send);
|
||||
|
||||
do {
|
||||
old = seq_send;
|
||||
seq_send = cmpxchg(&ctx->seq_send, old, old + 1);
|
||||
} while (old != seq_send);
|
||||
return seq_send;
|
||||
}
|
||||
|
||||
u64
|
||||
gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx)
|
||||
{
|
||||
u64 old, seq_send = READ_ONCE(ctx->seq_send);
|
||||
|
||||
do {
|
||||
old = seq_send;
|
||||
seq_send = cmpxchg64(&ctx->seq_send64, old, old + 1);
|
||||
} while (old != seq_send);
|
||||
return seq_send;
|
||||
}
|
||||
|
||||
static u32
|
||||
gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
|
||||
struct xdr_netobj *token)
|
||||
@ -154,9 +177,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
|
||||
|
||||
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
|
||||
|
||||
spin_lock(&krb5_seq_lock);
|
||||
seq_send = ctx->seq_send++;
|
||||
spin_unlock(&krb5_seq_lock);
|
||||
seq_send = gss_seq_send_fetch_and_inc(ctx);
|
||||
|
||||
if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
|
||||
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
|
||||
@ -174,7 +195,6 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
|
||||
.data = cksumdata};
|
||||
void *krb5_hdr;
|
||||
s32 now;
|
||||
u64 seq_send;
|
||||
u8 *cksumkey;
|
||||
unsigned int cksum_usage;
|
||||
__be64 seq_send_be64;
|
||||
@ -185,11 +205,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
|
||||
|
||||
/* Set up the sequence number. Now 64-bits in clear
|
||||
* text and w/o direction indicator */
|
||||
spin_lock(&krb5_seq_lock);
|
||||
seq_send = ctx->seq_send64++;
|
||||
spin_unlock(&krb5_seq_lock);
|
||||
|
||||
seq_send_be64 = cpu_to_be64(seq_send);
|
||||
seq_send_be64 = cpu_to_be64(gss_seq_send64_fetch_and_inc(ctx));
|
||||
memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8);
|
||||
|
||||
if (ctx->initiate) {
|
||||
|
@ -228,9 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
|
||||
|
||||
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
|
||||
|
||||
spin_lock(&krb5_seq_lock);
|
||||
seq_send = kctx->seq_send++;
|
||||
spin_unlock(&krb5_seq_lock);
|
||||
seq_send = gss_seq_send_fetch_and_inc(kctx);
|
||||
|
||||
/* XXX would probably be more efficient to compute checksum
|
||||
* and encrypt at the same time: */
|
||||
@ -477,9 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
|
||||
*be16ptr++ = 0;
|
||||
|
||||
be64ptr = (__be64 *)be16ptr;
|
||||
spin_lock(&krb5_seq_lock);
|
||||
*be64ptr = cpu_to_be64(kctx->seq_send64++);
|
||||
spin_unlock(&krb5_seq_lock);
|
||||
*be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx));
|
||||
|
||||
err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
|
||||
if (err)
|
||||
|
@ -117,7 +117,7 @@ int gss_mech_register(struct gss_api_mech *gm)
|
||||
if (status)
|
||||
return status;
|
||||
spin_lock(®istered_mechs_lock);
|
||||
list_add(&gm->gm_list, ®istered_mechs);
|
||||
list_add_rcu(&gm->gm_list, ®istered_mechs);
|
||||
spin_unlock(®istered_mechs_lock);
|
||||
dprintk("RPC: registered gss mechanism %s\n", gm->gm_name);
|
||||
return 0;
|
||||
@ -132,7 +132,7 @@ EXPORT_SYMBOL_GPL(gss_mech_register);
|
||||
void gss_mech_unregister(struct gss_api_mech *gm)
|
||||
{
|
||||
spin_lock(®istered_mechs_lock);
|
||||
list_del(&gm->gm_list);
|
||||
list_del_rcu(&gm->gm_list);
|
||||
spin_unlock(®istered_mechs_lock);
|
||||
dprintk("RPC: unregistered gss mechanism %s\n", gm->gm_name);
|
||||
gss_mech_free(gm);
|
||||
@ -151,15 +151,15 @@ _gss_mech_get_by_name(const char *name)
|
||||
{
|
||||
struct gss_api_mech *pos, *gm = NULL;
|
||||
|
||||
spin_lock(®istered_mechs_lock);
|
||||
list_for_each_entry(pos, ®istered_mechs, gm_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pos, ®istered_mechs, gm_list) {
|
||||
if (0 == strcmp(name, pos->gm_name)) {
|
||||
if (try_module_get(pos->gm_owner))
|
||||
gm = pos;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(®istered_mechs_lock);
|
||||
rcu_read_unlock();
|
||||
return gm;
|
||||
|
||||
}
|
||||
@ -186,8 +186,8 @@ struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj)
|
||||
dprintk("RPC: %s(%s)\n", __func__, buf);
|
||||
request_module("rpc-auth-gss-%s", buf);
|
||||
|
||||
spin_lock(®istered_mechs_lock);
|
||||
list_for_each_entry(pos, ®istered_mechs, gm_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pos, ®istered_mechs, gm_list) {
|
||||
if (obj->len == pos->gm_oid.len) {
|
||||
if (0 == memcmp(obj->data, pos->gm_oid.data, obj->len)) {
|
||||
if (try_module_get(pos->gm_owner))
|
||||
@ -196,7 +196,7 @@ struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock(®istered_mechs_lock);
|
||||
rcu_read_unlock();
|
||||
return gm;
|
||||
}
|
||||
|
||||
@ -216,15 +216,15 @@ static struct gss_api_mech *_gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
|
||||
{
|
||||
struct gss_api_mech *gm = NULL, *pos;
|
||||
|
||||
spin_lock(®istered_mechs_lock);
|
||||
list_for_each_entry(pos, ®istered_mechs, gm_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pos, ®istered_mechs, gm_list) {
|
||||
if (!mech_supports_pseudoflavor(pos, pseudoflavor))
|
||||
continue;
|
||||
if (try_module_get(pos->gm_owner))
|
||||
gm = pos;
|
||||
break;
|
||||
}
|
||||
spin_unlock(®istered_mechs_lock);
|
||||
rcu_read_unlock();
|
||||
return gm;
|
||||
}
|
||||
|
||||
@ -257,8 +257,8 @@ int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr, int size)
|
||||
struct gss_api_mech *pos = NULL;
|
||||
int j, i = 0;
|
||||
|
||||
spin_lock(®istered_mechs_lock);
|
||||
list_for_each_entry(pos, ®istered_mechs, gm_list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pos, ®istered_mechs, gm_list) {
|
||||
for (j = 0; j < pos->gm_pf_num; j++) {
|
||||
if (i >= size) {
|
||||
spin_unlock(®istered_mechs_lock);
|
||||
@ -267,7 +267,7 @@ int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr, int size)
|
||||
array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
|
||||
}
|
||||
}
|
||||
spin_unlock(®istered_mechs_lock);
|
||||
rcu_read_unlock();
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -784,6 +784,7 @@ void gssx_enc_accept_sec_context(struct rpc_rqst *req,
|
||||
xdr_inline_pages(&req->rq_rcv_buf,
|
||||
PAGE_SIZE/2 /* pretty arbitrary */,
|
||||
arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE);
|
||||
req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
|
||||
done:
|
||||
if (err)
|
||||
dprintk("RPC: gssx_enc_accept_sec_context: %d\n", err);
|
||||
|
@ -21,7 +21,7 @@ static struct rpc_cred null_cred;
|
||||
static struct rpc_auth *
|
||||
nul_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
|
||||
{
|
||||
atomic_inc(&null_auth.au_count);
|
||||
refcount_inc(&null_auth.au_count);
|
||||
return &null_auth;
|
||||
}
|
||||
|
||||
@ -119,7 +119,7 @@ struct rpc_auth null_auth = {
|
||||
.au_flags = RPCAUTH_AUTH_NO_CRKEY_TIMEOUT,
|
||||
.au_ops = &authnull_ops,
|
||||
.au_flavor = RPC_AUTH_NULL,
|
||||
.au_count = ATOMIC_INIT(0),
|
||||
.au_count = REFCOUNT_INIT(1),
|
||||
};
|
||||
|
||||
static
|
||||
@ -138,6 +138,6 @@ struct rpc_cred null_cred = {
|
||||
.cr_lru = LIST_HEAD_INIT(null_cred.cr_lru),
|
||||
.cr_auth = &null_auth,
|
||||
.cr_ops = &null_credops,
|
||||
.cr_count = ATOMIC_INIT(1),
|
||||
.cr_count = REFCOUNT_INIT(2),
|
||||
.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE,
|
||||
};
|
||||
|
@ -34,7 +34,7 @@ unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
|
||||
{
|
||||
dprintk("RPC: creating UNIX authenticator for client %p\n",
|
||||
clnt);
|
||||
atomic_inc(&unix_auth.au_count);
|
||||
refcount_inc(&unix_auth.au_count);
|
||||
return &unix_auth;
|
||||
}
|
||||
|
||||
@ -239,7 +239,7 @@ struct rpc_auth unix_auth = {
|
||||
.au_flags = RPCAUTH_AUTH_NO_CRKEY_TIMEOUT,
|
||||
.au_ops = &authunix_ops,
|
||||
.au_flavor = RPC_AUTH_UNIX,
|
||||
.au_count = ATOMIC_INIT(0),
|
||||
.au_count = REFCOUNT_INIT(1),
|
||||
};
|
||||
|
||||
static
|
||||
|
@ -91,7 +91,6 @@ struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
|
||||
return NULL;
|
||||
|
||||
req->rq_xprt = xprt;
|
||||
INIT_LIST_HEAD(&req->rq_list);
|
||||
INIT_LIST_HEAD(&req->rq_bc_list);
|
||||
|
||||
/* Preallocate one XDR receive buffer */
|
||||
|
@ -61,6 +61,7 @@ static void call_start(struct rpc_task *task);
|
||||
static void call_reserve(struct rpc_task *task);
|
||||
static void call_reserveresult(struct rpc_task *task);
|
||||
static void call_allocate(struct rpc_task *task);
|
||||
static void call_encode(struct rpc_task *task);
|
||||
static void call_decode(struct rpc_task *task);
|
||||
static void call_bind(struct rpc_task *task);
|
||||
static void call_bind_status(struct rpc_task *task);
|
||||
@ -1137,10 +1138,10 @@ EXPORT_SYMBOL_GPL(rpc_call_async);
|
||||
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
struct xdr_buf *xbufp = &req->rq_snd_buf;
|
||||
struct rpc_task_setup task_setup_data = {
|
||||
.callback_ops = &rpc_default_ops,
|
||||
.flags = RPC_TASK_SOFTCONN,
|
||||
.flags = RPC_TASK_SOFTCONN |
|
||||
RPC_TASK_NO_RETRANS_TIMEOUT,
|
||||
};
|
||||
|
||||
dprintk("RPC: rpc_run_bc_task req= %p\n", req);
|
||||
@ -1148,14 +1149,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
|
||||
* Create an rpc_task to send the data
|
||||
*/
|
||||
task = rpc_new_task(&task_setup_data);
|
||||
task->tk_rqstp = req;
|
||||
|
||||
/*
|
||||
* Set up the xdr_buf length.
|
||||
* This also indicates that the buffer is XDR encoded already.
|
||||
*/
|
||||
xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
|
||||
xbufp->tail[0].iov_len;
|
||||
xprt_init_bc_request(req, task);
|
||||
|
||||
task->tk_action = call_bc_transmit;
|
||||
atomic_inc(&task->tk_count);
|
||||
@ -1558,7 +1552,6 @@ call_reserveresult(struct rpc_task *task)
|
||||
task->tk_status = 0;
|
||||
if (status >= 0) {
|
||||
if (task->tk_rqstp) {
|
||||
xprt_request_init(task);
|
||||
task->tk_action = call_refresh;
|
||||
return;
|
||||
}
|
||||
@ -1680,7 +1673,7 @@ call_allocate(struct rpc_task *task)
|
||||
dprint_status(task);
|
||||
|
||||
task->tk_status = 0;
|
||||
task->tk_action = call_bind;
|
||||
task->tk_action = call_encode;
|
||||
|
||||
if (req->rq_buffer)
|
||||
return;
|
||||
@ -1721,22 +1714,15 @@ call_allocate(struct rpc_task *task)
|
||||
rpc_exit(task, -ERESTARTSYS);
|
||||
}
|
||||
|
||||
static inline int
|
||||
static int
|
||||
rpc_task_need_encode(struct rpc_task *task)
|
||||
{
|
||||
return task->tk_rqstp->rq_snd_buf.len == 0;
|
||||
return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 &&
|
||||
(!(task->tk_flags & RPC_TASK_SENT) ||
|
||||
!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) ||
|
||||
xprt_request_need_retransmit(task));
|
||||
}
|
||||
|
||||
static inline void
|
||||
rpc_task_force_reencode(struct rpc_task *task)
|
||||
{
|
||||
task->tk_rqstp->rq_snd_buf.len = 0;
|
||||
task->tk_rqstp->rq_bytes_sent = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* 3. Encode arguments of an RPC call
|
||||
*/
|
||||
static void
|
||||
rpc_xdr_encode(struct rpc_task *task)
|
||||
{
|
||||
@ -1752,6 +1738,7 @@ rpc_xdr_encode(struct rpc_task *task)
|
||||
xdr_buf_init(&req->rq_rcv_buf,
|
||||
req->rq_rbuffer,
|
||||
req->rq_rcvsize);
|
||||
req->rq_bytes_sent = 0;
|
||||
|
||||
p = rpc_encode_header(task);
|
||||
if (p == NULL) {
|
||||
@ -1766,6 +1753,36 @@ rpc_xdr_encode(struct rpc_task *task)
|
||||
|
||||
task->tk_status = rpcauth_wrap_req(task, encode, req, p,
|
||||
task->tk_msg.rpc_argp);
|
||||
if (task->tk_status == 0)
|
||||
xprt_request_prepare(req);
|
||||
}
|
||||
|
||||
/*
|
||||
* 3. Encode arguments of an RPC call
|
||||
*/
|
||||
static void
|
||||
call_encode(struct rpc_task *task)
|
||||
{
|
||||
if (!rpc_task_need_encode(task))
|
||||
goto out;
|
||||
/* Encode here so that rpcsec_gss can use correct sequence number. */
|
||||
rpc_xdr_encode(task);
|
||||
/* Did the encode result in an error condition? */
|
||||
if (task->tk_status != 0) {
|
||||
/* Was the error nonfatal? */
|
||||
if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM)
|
||||
rpc_delay(task, HZ >> 4);
|
||||
else
|
||||
rpc_exit(task, task->tk_status);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Add task to reply queue before transmission to avoid races */
|
||||
if (rpc_reply_expected(task))
|
||||
xprt_request_enqueue_receive(task);
|
||||
xprt_request_enqueue_transmit(task);
|
||||
out:
|
||||
task->tk_action = call_bind;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1947,43 +1964,16 @@ call_connect_status(struct rpc_task *task)
|
||||
static void
|
||||
call_transmit(struct rpc_task *task)
|
||||
{
|
||||
int is_retrans = RPC_WAS_SENT(task);
|
||||
|
||||
dprint_status(task);
|
||||
|
||||
task->tk_action = call_status;
|
||||
if (task->tk_status < 0)
|
||||
return;
|
||||
if (!xprt_prepare_transmit(task))
|
||||
return;
|
||||
task->tk_action = call_transmit_status;
|
||||
/* Encode here so that rpcsec_gss can use correct sequence number. */
|
||||
if (rpc_task_need_encode(task)) {
|
||||
rpc_xdr_encode(task);
|
||||
/* Did the encode result in an error condition? */
|
||||
if (task->tk_status != 0) {
|
||||
/* Was the error nonfatal? */
|
||||
if (task->tk_status == -EAGAIN)
|
||||
rpc_delay(task, HZ >> 4);
|
||||
else
|
||||
rpc_exit(task, task->tk_status);
|
||||
task->tk_status = 0;
|
||||
if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
|
||||
if (!xprt_prepare_transmit(task))
|
||||
return;
|
||||
}
|
||||
xprt_transmit(task);
|
||||
}
|
||||
xprt_transmit(task);
|
||||
if (task->tk_status < 0)
|
||||
return;
|
||||
if (is_retrans)
|
||||
task->tk_client->cl_stats->rpcretrans++;
|
||||
/*
|
||||
* On success, ensure that we call xprt_end_transmit() before sleeping
|
||||
* in order to allow access to the socket to other RPC requests.
|
||||
*/
|
||||
call_transmit_status(task);
|
||||
if (rpc_reply_expected(task))
|
||||
return;
|
||||
task->tk_action = rpc_exit_task;
|
||||
rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
|
||||
task->tk_action = call_transmit_status;
|
||||
xprt_end_transmit(task);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1999,19 +1989,17 @@ call_transmit_status(struct rpc_task *task)
|
||||
* test first.
|
||||
*/
|
||||
if (task->tk_status == 0) {
|
||||
xprt_end_transmit(task);
|
||||
rpc_task_force_reencode(task);
|
||||
xprt_request_wait_receive(task);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (task->tk_status) {
|
||||
case -EAGAIN:
|
||||
case -ENOBUFS:
|
||||
break;
|
||||
default:
|
||||
dprint_status(task);
|
||||
xprt_end_transmit(task);
|
||||
rpc_task_force_reencode(task);
|
||||
break;
|
||||
case -EBADMSG:
|
||||
task->tk_status = 0;
|
||||
task->tk_action = call_encode;
|
||||
break;
|
||||
/*
|
||||
* Special cases: if we've been waiting on the
|
||||
@ -2019,6 +2007,14 @@ call_transmit_status(struct rpc_task *task)
|
||||
* socket just returned a connection error,
|
||||
* then hold onto the transport lock.
|
||||
*/
|
||||
case -ENOBUFS:
|
||||
rpc_delay(task, HZ>>2);
|
||||
/* fall through */
|
||||
case -EBADSLT:
|
||||
case -EAGAIN:
|
||||
task->tk_action = call_transmit;
|
||||
task->tk_status = 0;
|
||||
break;
|
||||
case -ECONNREFUSED:
|
||||
case -EHOSTDOWN:
|
||||
case -ENETDOWN:
|
||||
@ -2026,7 +2022,6 @@ call_transmit_status(struct rpc_task *task)
|
||||
case -ENETUNREACH:
|
||||
case -EPERM:
|
||||
if (RPC_IS_SOFTCONN(task)) {
|
||||
xprt_end_transmit(task);
|
||||
if (!task->tk_msg.rpc_proc->p_proc)
|
||||
trace_xprt_ping(task->tk_xprt,
|
||||
task->tk_status);
|
||||
@ -2039,7 +2034,7 @@ call_transmit_status(struct rpc_task *task)
|
||||
case -EADDRINUSE:
|
||||
case -ENOTCONN:
|
||||
case -EPIPE:
|
||||
rpc_task_force_reencode(task);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2053,6 +2048,11 @@ call_bc_transmit(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
|
||||
if (rpc_task_need_encode(task))
|
||||
xprt_request_enqueue_transmit(task);
|
||||
if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
|
||||
goto out_wakeup;
|
||||
|
||||
if (!xprt_prepare_transmit(task))
|
||||
goto out_retry;
|
||||
|
||||
@ -2061,14 +2061,9 @@ call_bc_transmit(struct rpc_task *task)
|
||||
"error: %d\n", task->tk_status);
|
||||
goto out_done;
|
||||
}
|
||||
if (req->rq_connect_cookie != req->rq_xprt->connect_cookie)
|
||||
req->rq_bytes_sent = 0;
|
||||
|
||||
xprt_transmit(task);
|
||||
|
||||
if (task->tk_status == -EAGAIN)
|
||||
goto out_nospace;
|
||||
|
||||
xprt_end_transmit(task);
|
||||
dprint_status(task);
|
||||
switch (task->tk_status) {
|
||||
@ -2084,6 +2079,8 @@ call_bc_transmit(struct rpc_task *task)
|
||||
case -ENOTCONN:
|
||||
case -EPIPE:
|
||||
break;
|
||||
case -EAGAIN:
|
||||
goto out_retry;
|
||||
case -ETIMEDOUT:
|
||||
/*
|
||||
* Problem reaching the server. Disconnect and let the
|
||||
@ -2107,12 +2104,11 @@ call_bc_transmit(struct rpc_task *task)
|
||||
"error: %d\n", task->tk_status);
|
||||
break;
|
||||
}
|
||||
out_wakeup:
|
||||
rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
|
||||
out_done:
|
||||
task->tk_action = rpc_exit_task;
|
||||
return;
|
||||
out_nospace:
|
||||
req->rq_connect_cookie = req->rq_xprt->connect_cookie;
|
||||
out_retry:
|
||||
task->tk_status = 0;
|
||||
}
|
||||
@ -2125,15 +2121,11 @@ static void
|
||||
call_status(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_clnt *clnt = task->tk_client;
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
int status;
|
||||
|
||||
if (!task->tk_msg.rpc_proc->p_proc)
|
||||
trace_xprt_ping(task->tk_xprt, task->tk_status);
|
||||
|
||||
if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
|
||||
task->tk_status = req->rq_reply_bytes_recvd;
|
||||
|
||||
dprint_status(task);
|
||||
|
||||
status = task->tk_status;
|
||||
@ -2173,13 +2165,8 @@ call_status(struct rpc_task *task)
|
||||
/* fall through */
|
||||
case -EPIPE:
|
||||
case -ENOTCONN:
|
||||
task->tk_action = call_bind;
|
||||
break;
|
||||
case -ENOBUFS:
|
||||
rpc_delay(task, HZ>>2);
|
||||
/* fall through */
|
||||
case -EAGAIN:
|
||||
task->tk_action = call_transmit;
|
||||
task->tk_action = call_encode;
|
||||
break;
|
||||
case -EIO:
|
||||
/* shutdown or soft timeout */
|
||||
@ -2244,7 +2231,7 @@ call_timeout(struct rpc_task *task)
|
||||
rpcauth_invalcred(task);
|
||||
|
||||
retry:
|
||||
task->tk_action = call_bind;
|
||||
task->tk_action = call_encode;
|
||||
task->tk_status = 0;
|
||||
}
|
||||
|
||||
@ -2261,6 +2248,11 @@ call_decode(struct rpc_task *task)
|
||||
|
||||
dprint_status(task);
|
||||
|
||||
if (!decode) {
|
||||
task->tk_action = rpc_exit_task;
|
||||
return;
|
||||
}
|
||||
|
||||
if (task->tk_flags & RPC_CALL_MAJORSEEN) {
|
||||
if (clnt->cl_chatty) {
|
||||
printk(KERN_NOTICE "%s: server %s OK\n",
|
||||
@ -2283,7 +2275,7 @@ call_decode(struct rpc_task *task)
|
||||
|
||||
if (req->rq_rcv_buf.len < 12) {
|
||||
if (!RPC_IS_SOFT(task)) {
|
||||
task->tk_action = call_bind;
|
||||
task->tk_action = call_encode;
|
||||
goto out_retry;
|
||||
}
|
||||
dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
|
||||
@ -2298,13 +2290,11 @@ call_decode(struct rpc_task *task)
|
||||
goto out_retry;
|
||||
return;
|
||||
}
|
||||
|
||||
task->tk_action = rpc_exit_task;
|
||||
|
||||
if (decode) {
|
||||
task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
|
||||
task->tk_msg.rpc_resp);
|
||||
}
|
||||
task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
|
||||
task->tk_msg.rpc_resp);
|
||||
|
||||
dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
|
||||
task->tk_status);
|
||||
return;
|
||||
@ -2416,7 +2406,7 @@ rpc_verify_header(struct rpc_task *task)
|
||||
task->tk_garb_retry--;
|
||||
dprintk("RPC: %5u %s: retry garbled creds\n",
|
||||
task->tk_pid, __func__);
|
||||
task->tk_action = call_bind;
|
||||
task->tk_action = call_encode;
|
||||
goto out_retry;
|
||||
case RPC_AUTH_TOOWEAK:
|
||||
printk(KERN_NOTICE "RPC: server %s requires stronger "
|
||||
@ -2485,7 +2475,7 @@ out_garbage:
|
||||
task->tk_garb_retry--;
|
||||
dprintk("RPC: %5u %s: retrying\n",
|
||||
task->tk_pid, __func__);
|
||||
task->tk_action = call_bind;
|
||||
task->tk_action = call_encode;
|
||||
out_retry:
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
@ -99,37 +99,64 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
|
||||
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
|
||||
}
|
||||
|
||||
static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
|
||||
{
|
||||
struct list_head *q = &queue->tasks[queue->priority];
|
||||
struct rpc_task *task;
|
||||
|
||||
if (!list_empty(q)) {
|
||||
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
|
||||
if (task->tk_owner == queue->owner)
|
||||
list_move_tail(&task->u.tk_wait.list, q);
|
||||
}
|
||||
}
|
||||
|
||||
static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
|
||||
{
|
||||
if (queue->priority != priority) {
|
||||
/* Fairness: rotate the list when changing priority */
|
||||
rpc_rotate_queue_owner(queue);
|
||||
queue->priority = priority;
|
||||
queue->nr = 1U << priority;
|
||||
}
|
||||
}
|
||||
|
||||
static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
|
||||
{
|
||||
queue->owner = pid;
|
||||
queue->nr = RPC_BATCH_COUNT;
|
||||
}
|
||||
|
||||
static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
|
||||
{
|
||||
rpc_set_waitqueue_priority(queue, queue->maxpriority);
|
||||
rpc_set_waitqueue_owner(queue, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a request to a queue list
|
||||
*/
|
||||
static void
|
||||
__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
|
||||
{
|
||||
struct rpc_task *t;
|
||||
|
||||
list_for_each_entry(t, q, u.tk_wait.list) {
|
||||
if (t->tk_owner == task->tk_owner) {
|
||||
list_add_tail(&task->u.tk_wait.links,
|
||||
&t->u.tk_wait.links);
|
||||
/* Cache the queue head in task->u.tk_wait.list */
|
||||
task->u.tk_wait.list.next = q;
|
||||
task->u.tk_wait.list.prev = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
INIT_LIST_HEAD(&task->u.tk_wait.links);
|
||||
list_add_tail(&task->u.tk_wait.list, q);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove request from a queue list
|
||||
*/
|
||||
static void
|
||||
__rpc_list_dequeue_task(struct rpc_task *task)
|
||||
{
|
||||
struct list_head *q;
|
||||
struct rpc_task *t;
|
||||
|
||||
if (task->u.tk_wait.list.prev == NULL) {
|
||||
list_del(&task->u.tk_wait.links);
|
||||
return;
|
||||
}
|
||||
if (!list_empty(&task->u.tk_wait.links)) {
|
||||
t = list_first_entry(&task->u.tk_wait.links,
|
||||
struct rpc_task,
|
||||
u.tk_wait.links);
|
||||
/* Assume __rpc_list_enqueue_task() cached the queue head */
|
||||
q = t->u.tk_wait.list.next;
|
||||
list_add_tail(&t->u.tk_wait.list, q);
|
||||
list_del(&task->u.tk_wait.links);
|
||||
}
|
||||
list_del(&task->u.tk_wait.list);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -139,22 +166,9 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
|
||||
struct rpc_task *task,
|
||||
unsigned char queue_priority)
|
||||
{
|
||||
struct list_head *q;
|
||||
struct rpc_task *t;
|
||||
|
||||
INIT_LIST_HEAD(&task->u.tk_wait.links);
|
||||
if (unlikely(queue_priority > queue->maxpriority))
|
||||
queue_priority = queue->maxpriority;
|
||||
if (queue_priority > queue->priority)
|
||||
rpc_set_waitqueue_priority(queue, queue_priority);
|
||||
q = &queue->tasks[queue_priority];
|
||||
list_for_each_entry(t, q, u.tk_wait.list) {
|
||||
if (t->tk_owner == task->tk_owner) {
|
||||
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
|
||||
return;
|
||||
}
|
||||
}
|
||||
list_add_tail(&task->u.tk_wait.list, q);
|
||||
__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -194,13 +208,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
|
||||
*/
|
||||
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_task *t;
|
||||
|
||||
if (!list_empty(&task->u.tk_wait.links)) {
|
||||
t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
|
||||
list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
|
||||
list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
|
||||
}
|
||||
__rpc_list_dequeue_task(task);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -212,7 +220,8 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
|
||||
__rpc_disable_timer(queue, task);
|
||||
if (RPC_IS_PRIORITY(queue))
|
||||
__rpc_remove_wait_queue_priority(task);
|
||||
list_del(&task->u.tk_wait.list);
|
||||
else
|
||||
list_del(&task->u.tk_wait.list);
|
||||
queue->qlen--;
|
||||
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
|
||||
task->tk_pid, queue, rpc_qname(queue));
|
||||
@ -440,14 +449,28 @@ static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
|
||||
/*
|
||||
* Wake up a queued task while the queue lock is being held
|
||||
*/
|
||||
static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
|
||||
struct rpc_wait_queue *queue, struct rpc_task *task)
|
||||
static struct rpc_task *
|
||||
rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
|
||||
struct rpc_wait_queue *queue, struct rpc_task *task,
|
||||
bool (*action)(struct rpc_task *, void *), void *data)
|
||||
{
|
||||
if (RPC_IS_QUEUED(task)) {
|
||||
smp_rmb();
|
||||
if (task->tk_waitqueue == queue)
|
||||
__rpc_do_wake_up_task_on_wq(wq, queue, task);
|
||||
if (task->tk_waitqueue == queue) {
|
||||
if (action == NULL || action(task, data)) {
|
||||
__rpc_do_wake_up_task_on_wq(wq, queue, task);
|
||||
return task;
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
|
||||
struct rpc_wait_queue *queue, struct rpc_task *task)
|
||||
{
|
||||
rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -465,6 +488,8 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
|
||||
struct rpc_wait_queue *queue,
|
||||
struct rpc_task *task)
|
||||
{
|
||||
if (!RPC_IS_QUEUED(task))
|
||||
return;
|
||||
spin_lock_bh(&queue->lock);
|
||||
rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
|
||||
spin_unlock_bh(&queue->lock);
|
||||
@ -475,12 +500,48 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
|
||||
*/
|
||||
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
|
||||
{
|
||||
if (!RPC_IS_QUEUED(task))
|
||||
return;
|
||||
spin_lock_bh(&queue->lock);
|
||||
rpc_wake_up_task_queue_locked(queue, task);
|
||||
spin_unlock_bh(&queue->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
|
||||
|
||||
static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
|
||||
{
|
||||
task->tk_status = *(int *)status;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
|
||||
struct rpc_task *task, int status)
|
||||
{
|
||||
rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
|
||||
task, rpc_task_action_set_status, &status);
|
||||
}
|
||||
|
||||
/**
|
||||
* rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
|
||||
* @queue: pointer to rpc_wait_queue
|
||||
* @task: pointer to rpc_task
|
||||
* @status: integer error value
|
||||
*
|
||||
* If @task is queued on @queue, then it is woken up, and @task->tk_status is
|
||||
* set to the value of @status.
|
||||
*/
|
||||
void
|
||||
rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
|
||||
struct rpc_task *task, int status)
|
||||
{
|
||||
if (!RPC_IS_QUEUED(task))
|
||||
return;
|
||||
spin_lock_bh(&queue->lock);
|
||||
rpc_wake_up_task_queue_set_status_locked(queue, task, status);
|
||||
spin_unlock_bh(&queue->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up the next task on a priority queue.
|
||||
*/
|
||||
@ -493,17 +554,9 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
|
||||
* Service a batch of tasks from a single owner.
|
||||
*/
|
||||
q = &queue->tasks[queue->priority];
|
||||
if (!list_empty(q)) {
|
||||
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
|
||||
if (queue->owner == task->tk_owner) {
|
||||
if (--queue->nr)
|
||||
goto out;
|
||||
list_move_tail(&task->u.tk_wait.list, q);
|
||||
}
|
||||
/*
|
||||
* Check if we need to switch queues.
|
||||
*/
|
||||
goto new_owner;
|
||||
if (!list_empty(q) && --queue->nr) {
|
||||
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -515,7 +568,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
|
||||
else
|
||||
q = q - 1;
|
||||
if (!list_empty(q)) {
|
||||
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
|
||||
task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
|
||||
goto new_queue;
|
||||
}
|
||||
} while (q != &queue->tasks[queue->priority]);
|
||||
@ -525,8 +578,6 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
|
||||
|
||||
new_queue:
|
||||
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
|
||||
new_owner:
|
||||
rpc_set_waitqueue_owner(queue, task->tk_owner);
|
||||
out:
|
||||
return task;
|
||||
}
|
||||
@ -553,12 +604,9 @@ struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
|
||||
queue, rpc_qname(queue));
|
||||
spin_lock_bh(&queue->lock);
|
||||
task = __rpc_find_next_queued(queue);
|
||||
if (task != NULL) {
|
||||
if (func(task, data))
|
||||
rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
|
||||
else
|
||||
task = NULL;
|
||||
}
|
||||
if (task != NULL)
|
||||
task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
|
||||
task, func, data);
|
||||
spin_unlock_bh(&queue->lock);
|
||||
|
||||
return task;
|
||||
|
@ -26,7 +26,8 @@
|
||||
* Possibly called several times to iterate over an sk_buff and copy
|
||||
* data out of it.
|
||||
*/
|
||||
size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
|
||||
static size_t
|
||||
xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
|
||||
{
|
||||
if (len > desc->count)
|
||||
len = desc->count;
|
||||
@ -36,7 +37,6 @@ size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
|
||||
desc->offset += len;
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdr_skb_read_bits);
|
||||
|
||||
/**
|
||||
* xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
|
||||
@ -69,7 +69,8 @@ static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to,
|
||||
* @copy_actor: virtual method for copying data
|
||||
*
|
||||
*/
|
||||
ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
|
||||
static ssize_t
|
||||
xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
|
||||
{
|
||||
struct page **ppage = xdr->pages;
|
||||
unsigned int len, pglen = xdr->page_len;
|
||||
@ -104,7 +105,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
|
||||
|
||||
/* ACL likes to be lazy in allocating pages - ACLs
|
||||
* are small by default but can get huge. */
|
||||
if (unlikely(*ppage == NULL)) {
|
||||
if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) {
|
||||
*ppage = alloc_page(GFP_ATOMIC);
|
||||
if (unlikely(*ppage == NULL)) {
|
||||
if (copied == 0)
|
||||
@ -140,7 +141,6 @@ copy_tail:
|
||||
out:
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb);
|
||||
|
||||
/**
|
||||
* csum_partial_copy_to_xdr - checksum and copy data
|
||||
|
@ -171,7 +171,6 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
|
||||
mutex_init(&xprt->xpt_mutex);
|
||||
spin_lock_init(&xprt->xpt_lock);
|
||||
set_bit(XPT_BUSY, &xprt->xpt_flags);
|
||||
rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
|
||||
xprt->xpt_net = get_net(net);
|
||||
strcpy(xprt->xpt_remotebuf, "uninitialized");
|
||||
}
|
||||
@ -895,7 +894,6 @@ int svc_send(struct svc_rqst *rqstp)
|
||||
else
|
||||
len = xprt->xpt_ops->xpo_sendto(rqstp);
|
||||
mutex_unlock(&xprt->xpt_mutex);
|
||||
rpc_wake_up(&xprt->xpt_bc_pending);
|
||||
trace_svc_send(rqstp, len);
|
||||
svc_xprt_release(rqstp);
|
||||
|
||||
|
@ -1004,7 +1004,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
|
||||
if (!bc_xprt)
|
||||
return -EAGAIN;
|
||||
spin_lock(&bc_xprt->recv_lock);
|
||||
spin_lock(&bc_xprt->queue_lock);
|
||||
req = xprt_lookup_rqst(bc_xprt, xid);
|
||||
if (!req)
|
||||
goto unlock_notfound;
|
||||
@ -1022,7 +1022,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
|
||||
memcpy(dst->iov_base, src->iov_base, src->iov_len);
|
||||
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
|
||||
rqstp->rq_arg.len = 0;
|
||||
spin_unlock(&bc_xprt->recv_lock);
|
||||
spin_unlock(&bc_xprt->queue_lock);
|
||||
return 0;
|
||||
unlock_notfound:
|
||||
printk(KERN_NOTICE
|
||||
@ -1031,7 +1031,7 @@ unlock_notfound:
|
||||
__func__, ntohl(calldir),
|
||||
bc_xprt, ntohl(xid));
|
||||
unlock_eagain:
|
||||
spin_unlock(&bc_xprt->recv_lock);
|
||||
spin_unlock(&bc_xprt->queue_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
#include <linux/sunrpc/msg_prot.h>
|
||||
#include <linux/bvec.h>
|
||||
|
||||
/*
|
||||
* XDR functions for basic NFS types
|
||||
@ -128,6 +129,39 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdr_terminate_string);
|
||||
|
||||
size_t
|
||||
xdr_buf_pagecount(struct xdr_buf *buf)
|
||||
{
|
||||
if (!buf->page_len)
|
||||
return 0;
|
||||
return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
int
|
||||
xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
|
||||
{
|
||||
size_t i, n = xdr_buf_pagecount(buf);
|
||||
|
||||
if (n != 0 && buf->bvec == NULL) {
|
||||
buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
|
||||
if (!buf->bvec)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < n; i++) {
|
||||
buf->bvec[i].bv_page = buf->pages[i];
|
||||
buf->bvec[i].bv_len = PAGE_SIZE;
|
||||
buf->bvec[i].bv_offset = 0;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
xdr_free_bvec(struct xdr_buf *buf)
|
||||
{
|
||||
kfree(buf->bvec);
|
||||
buf->bvec = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
|
||||
struct page **pages, unsigned int base, unsigned int len)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -51,12 +51,11 @@ static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
|
||||
rqst = &req->rl_slot;
|
||||
|
||||
rqst->rq_xprt = xprt;
|
||||
INIT_LIST_HEAD(&rqst->rq_list);
|
||||
INIT_LIST_HEAD(&rqst->rq_bc_list);
|
||||
__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
|
||||
spin_lock_bh(&xprt->bc_pa_lock);
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
spin_unlock_bh(&xprt->bc_pa_lock);
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
|
||||
size = r_xprt->rx_data.inline_rsize;
|
||||
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
|
||||
@ -201,6 +200,9 @@ int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
|
||||
if (!xprt_connected(rqst->rq_xprt))
|
||||
goto drop_connection;
|
||||
|
||||
if (!xprt_request_get_cong(rqst->rq_xprt, rqst))
|
||||
return -EBADSLT;
|
||||
|
||||
rc = rpcrdma_bc_marshal_reply(rqst);
|
||||
if (rc < 0)
|
||||
goto failed_marshal;
|
||||
@ -228,16 +230,16 @@ void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
struct rpc_rqst *rqst, *tmp;
|
||||
|
||||
spin_lock_bh(&xprt->bc_pa_lock);
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
|
||||
list_del(&rqst->rq_bc_pa_list);
|
||||
spin_unlock_bh(&xprt->bc_pa_lock);
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
|
||||
rpcrdma_bc_free_rqst(r_xprt, rqst);
|
||||
|
||||
spin_lock_bh(&xprt->bc_pa_lock);
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
}
|
||||
spin_unlock_bh(&xprt->bc_pa_lock);
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -255,9 +257,9 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
|
||||
rpcrdma_recv_buffer_put(req->rl_reply);
|
||||
req->rl_reply = NULL;
|
||||
|
||||
spin_lock_bh(&xprt->bc_pa_lock);
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
spin_unlock_bh(&xprt->bc_pa_lock);
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -49,6 +49,65 @@ fmr_is_supported(struct rpcrdma_ia *ia)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
__fmr_unmap(struct rpcrdma_mr *mr)
|
||||
{
|
||||
LIST_HEAD(l);
|
||||
int rc;
|
||||
|
||||
list_add(&mr->fmr.fm_mr->list, &l);
|
||||
rc = ib_unmap_fmr(&l);
|
||||
list_del(&mr->fmr.fm_mr->list);
|
||||
if (rc)
|
||||
pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
|
||||
mr, rc);
|
||||
}
|
||||
|
||||
/* Release an MR.
|
||||
*/
|
||||
static void
|
||||
fmr_op_release_mr(struct rpcrdma_mr *mr)
|
||||
{
|
||||
int rc;
|
||||
|
||||
kfree(mr->fmr.fm_physaddrs);
|
||||
kfree(mr->mr_sg);
|
||||
|
||||
/* In case this one was left mapped, try to unmap it
|
||||
* to prevent dealloc_fmr from failing with EBUSY
|
||||
*/
|
||||
__fmr_unmap(mr);
|
||||
|
||||
rc = ib_dealloc_fmr(mr->fmr.fm_mr);
|
||||
if (rc)
|
||||
pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
|
||||
mr, rc);
|
||||
|
||||
kfree(mr);
|
||||
}
|
||||
|
||||
/* MRs are dynamically allocated, so simply clean up and release the MR.
|
||||
* A replacement MR will subsequently be allocated on demand.
|
||||
*/
|
||||
static void
|
||||
fmr_mr_recycle_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
|
||||
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
||||
|
||||
trace_xprtrdma_mr_recycle(mr);
|
||||
|
||||
trace_xprtrdma_mr_unmap(mr);
|
||||
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
||||
mr->mr_sg, mr->mr_nents, mr->mr_dir);
|
||||
|
||||
spin_lock(&r_xprt->rx_buf.rb_mrlock);
|
||||
list_del(&mr->mr_all);
|
||||
r_xprt->rx_stats.mrs_recycled++;
|
||||
spin_unlock(&r_xprt->rx_buf.rb_mrlock);
|
||||
fmr_op_release_mr(mr);
|
||||
}
|
||||
|
||||
static int
|
||||
fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
|
||||
{
|
||||
@ -76,6 +135,7 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
|
||||
goto out_fmr_err;
|
||||
|
||||
INIT_LIST_HEAD(&mr->mr_list);
|
||||
INIT_WORK(&mr->mr_recycle, fmr_mr_recycle_worker);
|
||||
return 0;
|
||||
|
||||
out_fmr_err:
|
||||
@ -88,77 +148,6 @@ out_free:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int
|
||||
__fmr_unmap(struct rpcrdma_mr *mr)
|
||||
{
|
||||
LIST_HEAD(l);
|
||||
int rc;
|
||||
|
||||
list_add(&mr->fmr.fm_mr->list, &l);
|
||||
rc = ib_unmap_fmr(&l);
|
||||
list_del(&mr->fmr.fm_mr->list);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
fmr_op_release_mr(struct rpcrdma_mr *mr)
|
||||
{
|
||||
LIST_HEAD(unmap_list);
|
||||
int rc;
|
||||
|
||||
kfree(mr->fmr.fm_physaddrs);
|
||||
kfree(mr->mr_sg);
|
||||
|
||||
/* In case this one was left mapped, try to unmap it
|
||||
* to prevent dealloc_fmr from failing with EBUSY
|
||||
*/
|
||||
rc = __fmr_unmap(mr);
|
||||
if (rc)
|
||||
pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
|
||||
mr, rc);
|
||||
|
||||
rc = ib_dealloc_fmr(mr->fmr.fm_mr);
|
||||
if (rc)
|
||||
pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
|
||||
mr, rc);
|
||||
|
||||
kfree(mr);
|
||||
}
|
||||
|
||||
/* Reset of a single FMR.
|
||||
*/
|
||||
static void
|
||||
fmr_op_recover_mr(struct rpcrdma_mr *mr)
|
||||
{
|
||||
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
||||
int rc;
|
||||
|
||||
/* ORDER: invalidate first */
|
||||
rc = __fmr_unmap(mr);
|
||||
if (rc)
|
||||
goto out_release;
|
||||
|
||||
/* ORDER: then DMA unmap */
|
||||
rpcrdma_mr_unmap_and_put(mr);
|
||||
|
||||
r_xprt->rx_stats.mrs_recovered++;
|
||||
return;
|
||||
|
||||
out_release:
|
||||
pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
|
||||
r_xprt->rx_stats.mrs_orphaned++;
|
||||
|
||||
trace_xprtrdma_dma_unmap(mr);
|
||||
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
||||
mr->mr_sg, mr->mr_nents, mr->mr_dir);
|
||||
|
||||
spin_lock(&r_xprt->rx_buf.rb_mrlock);
|
||||
list_del(&mr->mr_all);
|
||||
spin_unlock(&r_xprt->rx_buf.rb_mrlock);
|
||||
|
||||
fmr_op_release_mr(mr);
|
||||
}
|
||||
|
||||
/* On success, sets:
|
||||
* ep->rep_attr.cap.max_send_wr
|
||||
* ep->rep_attr.cap.max_recv_wr
|
||||
@ -187,6 +176,7 @@ fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
|
||||
|
||||
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
|
||||
RPCRDMA_MAX_FMR_SGES);
|
||||
ia->ri_max_segs += 2; /* segments for head and tail buffers */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -244,7 +234,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||
mr->mr_sg, i, mr->mr_dir);
|
||||
if (!mr->mr_nents)
|
||||
goto out_dmamap_err;
|
||||
trace_xprtrdma_dma_map(mr);
|
||||
trace_xprtrdma_mr_map(mr);
|
||||
|
||||
for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
|
||||
dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
|
||||
@ -305,13 +295,13 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
|
||||
list_for_each_entry(mr, mrs, mr_list) {
|
||||
dprintk("RPC: %s: unmapping fmr %p\n",
|
||||
__func__, &mr->fmr);
|
||||
trace_xprtrdma_localinv(mr);
|
||||
trace_xprtrdma_mr_localinv(mr);
|
||||
list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
|
||||
}
|
||||
r_xprt->rx_stats.local_inv_needed++;
|
||||
rc = ib_unmap_fmr(&unmap_list);
|
||||
if (rc)
|
||||
goto out_reset;
|
||||
goto out_release;
|
||||
|
||||
/* ORDER: Now DMA unmap all of the req's MRs, and return
|
||||
* them to the free MW list.
|
||||
@ -324,13 +314,13 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
|
||||
|
||||
return;
|
||||
|
||||
out_reset:
|
||||
out_release:
|
||||
pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
|
||||
|
||||
while (!list_empty(mrs)) {
|
||||
mr = rpcrdma_mr_pop(mrs);
|
||||
list_del(&mr->fmr.fm_mr->list);
|
||||
fmr_op_recover_mr(mr);
|
||||
rpcrdma_mr_recycle(mr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -338,7 +328,6 @@ const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
|
||||
.ro_map = fmr_op_map,
|
||||
.ro_send = fmr_op_send,
|
||||
.ro_unmap_sync = fmr_op_unmap_sync,
|
||||
.ro_recover_mr = fmr_op_recover_mr,
|
||||
.ro_open = fmr_op_open,
|
||||
.ro_maxpages = fmr_op_maxpages,
|
||||
.ro_init_mr = fmr_op_init_mr,
|
||||
|
@ -97,6 +97,44 @@ out_not_supported:
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
frwr_op_release_mr(struct rpcrdma_mr *mr)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ib_dereg_mr(mr->frwr.fr_mr);
|
||||
if (rc)
|
||||
pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
|
||||
mr, rc);
|
||||
kfree(mr->mr_sg);
|
||||
kfree(mr);
|
||||
}
|
||||
|
||||
/* MRs are dynamically allocated, so simply clean up and release the MR.
|
||||
* A replacement MR will subsequently be allocated on demand.
|
||||
*/
|
||||
static void
|
||||
frwr_mr_recycle_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
|
||||
enum rpcrdma_frwr_state state = mr->frwr.fr_state;
|
||||
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
||||
|
||||
trace_xprtrdma_mr_recycle(mr);
|
||||
|
||||
if (state != FRWR_FLUSHED_LI) {
|
||||
trace_xprtrdma_mr_unmap(mr);
|
||||
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
||||
mr->mr_sg, mr->mr_nents, mr->mr_dir);
|
||||
}
|
||||
|
||||
spin_lock(&r_xprt->rx_buf.rb_mrlock);
|
||||
list_del(&mr->mr_all);
|
||||
r_xprt->rx_stats.mrs_recycled++;
|
||||
spin_unlock(&r_xprt->rx_buf.rb_mrlock);
|
||||
frwr_op_release_mr(mr);
|
||||
}
|
||||
|
||||
static int
|
||||
frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
|
||||
{
|
||||
@ -113,6 +151,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
|
||||
goto out_list_err;
|
||||
|
||||
INIT_LIST_HEAD(&mr->mr_list);
|
||||
INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
|
||||
sg_init_table(mr->mr_sg, depth);
|
||||
init_completion(&frwr->fr_linv_done);
|
||||
return 0;
|
||||
@ -131,79 +170,6 @@ out_list_err:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
frwr_op_release_mr(struct rpcrdma_mr *mr)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ib_dereg_mr(mr->frwr.fr_mr);
|
||||
if (rc)
|
||||
pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
|
||||
mr, rc);
|
||||
kfree(mr->mr_sg);
|
||||
kfree(mr);
|
||||
}
|
||||
|
||||
static int
|
||||
__frwr_mr_reset(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
|
||||
{
|
||||
struct rpcrdma_frwr *frwr = &mr->frwr;
|
||||
int rc;
|
||||
|
||||
rc = ib_dereg_mr(frwr->fr_mr);
|
||||
if (rc) {
|
||||
pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
|
||||
rc, mr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
|
||||
ia->ri_max_frwr_depth);
|
||||
if (IS_ERR(frwr->fr_mr)) {
|
||||
pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
|
||||
PTR_ERR(frwr->fr_mr), mr);
|
||||
return PTR_ERR(frwr->fr_mr);
|
||||
}
|
||||
|
||||
dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr);
|
||||
frwr->fr_state = FRWR_IS_INVALID;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
|
||||
*/
|
||||
static void
|
||||
frwr_op_recover_mr(struct rpcrdma_mr *mr)
|
||||
{
|
||||
enum rpcrdma_frwr_state state = mr->frwr.fr_state;
|
||||
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
int rc;
|
||||
|
||||
rc = __frwr_mr_reset(ia, mr);
|
||||
if (state != FRWR_FLUSHED_LI) {
|
||||
trace_xprtrdma_dma_unmap(mr);
|
||||
ib_dma_unmap_sg(ia->ri_device,
|
||||
mr->mr_sg, mr->mr_nents, mr->mr_dir);
|
||||
}
|
||||
if (rc)
|
||||
goto out_release;
|
||||
|
||||
rpcrdma_mr_put(mr);
|
||||
r_xprt->rx_stats.mrs_recovered++;
|
||||
return;
|
||||
|
||||
out_release:
|
||||
pr_err("rpcrdma: FRWR reset failed %d, %p released\n", rc, mr);
|
||||
r_xprt->rx_stats.mrs_orphaned++;
|
||||
|
||||
spin_lock(&r_xprt->rx_buf.rb_mrlock);
|
||||
list_del(&mr->mr_all);
|
||||
spin_unlock(&r_xprt->rx_buf.rb_mrlock);
|
||||
|
||||
frwr_op_release_mr(mr);
|
||||
}
|
||||
|
||||
/* On success, sets:
|
||||
* ep->rep_attr.cap.max_send_wr
|
||||
* ep->rep_attr.cap.max_recv_wr
|
||||
@ -276,6 +242,7 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
|
||||
|
||||
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
|
||||
ia->ri_max_frwr_depth);
|
||||
ia->ri_max_segs += 2; /* segments for head and tail buffers */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -384,7 +351,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||
mr = NULL;
|
||||
do {
|
||||
if (mr)
|
||||
rpcrdma_mr_defer_recovery(mr);
|
||||
rpcrdma_mr_recycle(mr);
|
||||
mr = rpcrdma_mr_get(r_xprt);
|
||||
if (!mr)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
@ -417,7 +384,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||
mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
|
||||
if (!mr->mr_nents)
|
||||
goto out_dmamap_err;
|
||||
trace_xprtrdma_dma_map(mr);
|
||||
trace_xprtrdma_mr_map(mr);
|
||||
|
||||
ibmr = frwr->fr_mr;
|
||||
n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
|
||||
@ -451,7 +418,7 @@ out_dmamap_err:
|
||||
out_mapmr_err:
|
||||
pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
|
||||
frwr->fr_mr, n, mr->mr_nents);
|
||||
rpcrdma_mr_defer_recovery(mr);
|
||||
rpcrdma_mr_recycle(mr);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
@ -499,7 +466,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
|
||||
list_for_each_entry(mr, mrs, mr_list)
|
||||
if (mr->mr_handle == rep->rr_inv_rkey) {
|
||||
list_del_init(&mr->mr_list);
|
||||
trace_xprtrdma_remoteinv(mr);
|
||||
trace_xprtrdma_mr_remoteinv(mr);
|
||||
mr->frwr.fr_state = FRWR_IS_INVALID;
|
||||
rpcrdma_mr_unmap_and_put(mr);
|
||||
break; /* only one invalidated MR per RPC */
|
||||
@ -536,7 +503,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
|
||||
mr->frwr.fr_state = FRWR_IS_INVALID;
|
||||
|
||||
frwr = &mr->frwr;
|
||||
trace_xprtrdma_localinv(mr);
|
||||
trace_xprtrdma_mr_localinv(mr);
|
||||
|
||||
frwr->fr_cqe.done = frwr_wc_localinv;
|
||||
last = &frwr->fr_invwr;
|
||||
@ -570,7 +537,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
|
||||
if (bad_wr != first)
|
||||
wait_for_completion(&frwr->fr_linv_done);
|
||||
if (rc)
|
||||
goto reset_mrs;
|
||||
goto out_release;
|
||||
|
||||
/* ORDER: Now DMA unmap all of the MRs, and return
|
||||
* them to the free MR list.
|
||||
@ -582,22 +549,21 @@ unmap:
|
||||
}
|
||||
return;
|
||||
|
||||
reset_mrs:
|
||||
out_release:
|
||||
pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
|
||||
|
||||
/* Find and reset the MRs in the LOCAL_INV WRs that did not
|
||||
/* Unmap and release the MRs in the LOCAL_INV WRs that did not
|
||||
* get posted.
|
||||
*/
|
||||
while (bad_wr) {
|
||||
frwr = container_of(bad_wr, struct rpcrdma_frwr,
|
||||
fr_invwr);
|
||||
mr = container_of(frwr, struct rpcrdma_mr, frwr);
|
||||
|
||||
__frwr_mr_reset(ia, mr);
|
||||
|
||||
bad_wr = bad_wr->next;
|
||||
|
||||
list_del(&mr->mr_list);
|
||||
frwr_op_release_mr(mr);
|
||||
}
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
|
||||
@ -605,7 +571,6 @@ const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
|
||||
.ro_send = frwr_op_send,
|
||||
.ro_reminv = frwr_op_reminv,
|
||||
.ro_unmap_sync = frwr_op_unmap_sync,
|
||||
.ro_recover_mr = frwr_op_recover_mr,
|
||||
.ro_open = frwr_op_open,
|
||||
.ro_maxpages = frwr_op_maxpages,
|
||||
.ro_init_mr = frwr_op_init_mr,
|
||||
|
@ -71,7 +71,6 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
|
||||
size = RPCRDMA_HDRLEN_MIN;
|
||||
|
||||
/* Maximum Read list size */
|
||||
maxsegs += 2; /* segment for head and tail buffers */
|
||||
size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
|
||||
|
||||
/* Minimal Read chunk size */
|
||||
@ -97,7 +96,6 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
|
||||
size = RPCRDMA_HDRLEN_MIN;
|
||||
|
||||
/* Maximum Write list size */
|
||||
maxsegs += 2; /* segment for head and tail buffers */
|
||||
size = sizeof(__be32); /* segment count */
|
||||
size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
|
||||
size += sizeof(__be32); /* list discriminator */
|
||||
@ -805,7 +803,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
|
||||
struct rpcrdma_mr *mr;
|
||||
|
||||
mr = rpcrdma_mr_pop(&req->rl_registered);
|
||||
rpcrdma_mr_defer_recovery(mr);
|
||||
rpcrdma_mr_recycle(mr);
|
||||
}
|
||||
|
||||
/* This implementation supports the following combinations
|
||||
@ -866,7 +864,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
|
||||
out_err:
|
||||
switch (ret) {
|
||||
case -EAGAIN:
|
||||
xprt_wait_for_buffer_space(rqst->rq_task, NULL);
|
||||
xprt_wait_for_buffer_space(rqst->rq_xprt);
|
||||
break;
|
||||
case -ENOBUFS:
|
||||
break;
|
||||
@ -1216,7 +1214,6 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
|
||||
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
|
||||
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
||||
struct rpc_rqst *rqst = rep->rr_rqst;
|
||||
unsigned long cwnd;
|
||||
int status;
|
||||
|
||||
xprt->reestablish_timeout = 0;
|
||||
@ -1238,15 +1235,10 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
|
||||
goto out_badheader;
|
||||
|
||||
out:
|
||||
spin_lock(&xprt->recv_lock);
|
||||
cwnd = xprt->cwnd;
|
||||
xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
|
||||
if (xprt->cwnd > cwnd)
|
||||
xprt_release_rqst_cong(rqst->rq_task);
|
||||
|
||||
spin_lock(&xprt->queue_lock);
|
||||
xprt_complete_rqst(rqst->rq_task, status);
|
||||
xprt_unpin_rqst(rqst);
|
||||
spin_unlock(&xprt->recv_lock);
|
||||
spin_unlock(&xprt->queue_lock);
|
||||
return;
|
||||
|
||||
/* If the incoming reply terminated a pending RPC, the next
|
||||
@ -1345,19 +1337,23 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
|
||||
/* Match incoming rpcrdma_rep to an rpcrdma_req to
|
||||
* get context for handling any incoming chunks.
|
||||
*/
|
||||
spin_lock(&xprt->recv_lock);
|
||||
spin_lock(&xprt->queue_lock);
|
||||
rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
|
||||
if (!rqst)
|
||||
goto out_norqst;
|
||||
xprt_pin_rqst(rqst);
|
||||
spin_unlock(&xprt->queue_lock);
|
||||
|
||||
if (credits == 0)
|
||||
credits = 1; /* don't deadlock */
|
||||
else if (credits > buf->rb_max_requests)
|
||||
credits = buf->rb_max_requests;
|
||||
buf->rb_credits = credits;
|
||||
|
||||
spin_unlock(&xprt->recv_lock);
|
||||
if (buf->rb_credits != credits) {
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
buf->rb_credits = credits;
|
||||
xprt->cwnd = credits << RPC_CWNDSHIFT;
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
}
|
||||
|
||||
req = rpcr_to_rdmar(rqst);
|
||||
req->rl_reply = rep;
|
||||
@ -1378,7 +1374,7 @@ out_badversion:
|
||||
* is corrupt.
|
||||
*/
|
||||
out_norqst:
|
||||
spin_unlock(&xprt->recv_lock);
|
||||
spin_unlock(&xprt->queue_lock);
|
||||
trace_xprtrdma_reply_rqst(rep);
|
||||
goto repost;
|
||||
|
||||
|
@ -56,7 +56,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
|
||||
if (src->iov_len < 24)
|
||||
goto out_shortreply;
|
||||
|
||||
spin_lock(&xprt->recv_lock);
|
||||
spin_lock(&xprt->queue_lock);
|
||||
req = xprt_lookup_rqst(xprt, xid);
|
||||
if (!req)
|
||||
goto out_notfound;
|
||||
@ -86,7 +86,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
|
||||
rcvbuf->len = 0;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&xprt->recv_lock);
|
||||
spin_unlock(&xprt->queue_lock);
|
||||
out:
|
||||
return ret;
|
||||
|
||||
@ -215,9 +215,8 @@ drop_connection:
|
||||
* connection.
|
||||
*/
|
||||
static int
|
||||
xprt_rdma_bc_send_request(struct rpc_task *task)
|
||||
xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
|
||||
{
|
||||
struct rpc_rqst *rqst = task->tk_rqstp;
|
||||
struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
|
||||
struct svcxprt_rdma *rdma;
|
||||
int ret;
|
||||
@ -225,12 +224,7 @@ xprt_rdma_bc_send_request(struct rpc_task *task)
|
||||
dprintk("svcrdma: sending bc call with xid: %08x\n",
|
||||
be32_to_cpu(rqst->rq_xid));
|
||||
|
||||
if (!mutex_trylock(&sxprt->xpt_mutex)) {
|
||||
rpc_sleep_on(&sxprt->xpt_bc_pending, task, NULL);
|
||||
if (!mutex_trylock(&sxprt->xpt_mutex))
|
||||
return -EAGAIN;
|
||||
rpc_wake_up_queued_task(&sxprt->xpt_bc_pending, task);
|
||||
}
|
||||
mutex_lock(&sxprt->xpt_mutex);
|
||||
|
||||
ret = -ENOTCONN;
|
||||
rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
|
||||
@ -248,6 +242,7 @@ static void
|
||||
xprt_rdma_bc_close(struct rpc_xprt *xprt)
|
||||
{
|
||||
dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
|
||||
xprt->cwnd = RPC_CWNDSHIFT;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -225,69 +225,59 @@ xprt_rdma_free_addresses(struct rpc_xprt *xprt)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rpcrdma_conn_func(struct rpcrdma_ep *ep)
|
||||
{
|
||||
schedule_delayed_work(&ep->rep_connect_worker, 0);
|
||||
}
|
||||
|
||||
void
|
||||
rpcrdma_connect_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_ep *ep =
|
||||
container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
|
||||
struct rpcrdma_xprt *r_xprt =
|
||||
container_of(ep, struct rpcrdma_xprt, rx_ep);
|
||||
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
||||
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
if (ep->rep_connected > 0) {
|
||||
if (!xprt_test_and_set_connected(xprt))
|
||||
xprt_wake_pending_tasks(xprt, 0);
|
||||
} else {
|
||||
if (xprt_test_and_clear_connected(xprt))
|
||||
xprt_wake_pending_tasks(xprt, -ENOTCONN);
|
||||
}
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_rdma_connect_worker - establish connection in the background
|
||||
* @work: worker thread context
|
||||
*
|
||||
* Requester holds the xprt's send lock to prevent activity on this
|
||||
* transport while a fresh connection is being established. RPC tasks
|
||||
* sleep on the xprt's pending queue waiting for connect to complete.
|
||||
*/
|
||||
static void
|
||||
xprt_rdma_connect_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
|
||||
rx_connect_worker.work);
|
||||
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
||||
int rc = 0;
|
||||
|
||||
xprt_clear_connected(xprt);
|
||||
int rc;
|
||||
|
||||
rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
|
||||
if (rc)
|
||||
xprt_wake_pending_tasks(xprt, rc);
|
||||
|
||||
xprt_clear_connecting(xprt);
|
||||
if (r_xprt->rx_ep.rep_connected > 0) {
|
||||
if (!xprt_test_and_set_connected(xprt)) {
|
||||
xprt->stat.connect_count++;
|
||||
xprt->stat.connect_time += (long)jiffies -
|
||||
xprt->stat.connect_start;
|
||||
xprt_wake_pending_tasks(xprt, -EAGAIN);
|
||||
}
|
||||
} else {
|
||||
if (xprt_test_and_clear_connected(xprt))
|
||||
xprt_wake_pending_tasks(xprt, rc);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_rdma_inject_disconnect - inject a connection fault
|
||||
* @xprt: transport context
|
||||
*
|
||||
* If @xprt is connected, disconnect it to simulate spurious connection
|
||||
* loss.
|
||||
*/
|
||||
static void
|
||||
xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
|
||||
{
|
||||
struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt,
|
||||
rx_xprt);
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
|
||||
trace_xprtrdma_inject_dsc(r_xprt);
|
||||
rdma_disconnect(r_xprt->rx_ia.ri_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* xprt_rdma_destroy
|
||||
/**
|
||||
* xprt_rdma_destroy - Full tear down of transport
|
||||
* @xprt: doomed transport context
|
||||
*
|
||||
* Destroy the xprt.
|
||||
* Free all memory associated with the object, including its own.
|
||||
* NOTE: none of the *destroy methods free memory for their top-level
|
||||
* objects, even though they may have allocated it (they do free
|
||||
* private memory). It's up to the caller to handle it. In this
|
||||
* case (RDMA transport), all structure memory is inlined with the
|
||||
* struct rpcrdma_xprt.
|
||||
* Caller guarantees there will be no more calls to us with
|
||||
* this @xprt.
|
||||
*/
|
||||
static void
|
||||
xprt_rdma_destroy(struct rpc_xprt *xprt)
|
||||
@ -298,8 +288,6 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
|
||||
|
||||
cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
|
||||
|
||||
xprt_clear_connected(xprt);
|
||||
|
||||
rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
|
||||
rpcrdma_buffer_destroy(&r_xprt->rx_buf);
|
||||
rpcrdma_ia_close(&r_xprt->rx_ia);
|
||||
@ -442,11 +430,12 @@ out1:
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_rdma_close - Close down RDMA connection
|
||||
* @xprt: generic transport to be closed
|
||||
* xprt_rdma_close - close a transport connection
|
||||
* @xprt: transport context
|
||||
*
|
||||
* Called during transport shutdown reconnect, or device
|
||||
* removal. Caller holds the transport's write lock.
|
||||
* Called during transport shutdown, reconnect, or device removal.
|
||||
* Caller holds @xprt's send lock to prevent activity on this
|
||||
* transport while the connection is torn down.
|
||||
*/
|
||||
static void
|
||||
xprt_rdma_close(struct rpc_xprt *xprt)
|
||||
@ -468,6 +457,12 @@ xprt_rdma_close(struct rpc_xprt *xprt)
|
||||
xprt->reestablish_timeout = 0;
|
||||
xprt_disconnect_done(xprt);
|
||||
rpcrdma_ep_disconnect(ep, ia);
|
||||
|
||||
/* Prepare @xprt for the next connection by reinitializing
|
||||
* its credit grant to one (see RFC 8166, Section 3.3.3).
|
||||
*/
|
||||
r_xprt->rx_buf.rb_credits = 1;
|
||||
xprt->cwnd = RPC_CWNDSHIFT;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -519,6 +514,12 @@ xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||
xprt_force_disconnect(xprt);
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_rdma_connect - try to establish a transport connection
|
||||
* @xprt: transport state
|
||||
* @task: RPC scheduler context
|
||||
*
|
||||
*/
|
||||
static void
|
||||
xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
|
||||
{
|
||||
@ -638,13 +639,6 @@ rpcrdma_get_recvbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
||||
* 0: Success; rq_buffer points to RPC buffer to use
|
||||
* ENOMEM: Out of memory, call again later
|
||||
* EIO: A permanent error occurred, do not retry
|
||||
*
|
||||
* The RDMA allocate/free functions need the task structure as a place
|
||||
* to hide the struct rpcrdma_req, which is necessary for the actual
|
||||
* send/recv sequence.
|
||||
*
|
||||
* xprt_rdma_allocate provides buffers that are already mapped for
|
||||
* DMA, and a local DMA lkey is provided for each.
|
||||
*/
|
||||
static int
|
||||
xprt_rdma_allocate(struct rpc_task *task)
|
||||
@ -693,7 +687,7 @@ xprt_rdma_free(struct rpc_task *task)
|
||||
|
||||
/**
|
||||
* xprt_rdma_send_request - marshal and send an RPC request
|
||||
* @task: RPC task with an RPC message in rq_snd_buf
|
||||
* @rqst: RPC message in rq_snd_buf
|
||||
*
|
||||
* Caller holds the transport's write lock.
|
||||
*
|
||||
@ -706,9 +700,8 @@ xprt_rdma_free(struct rpc_task *task)
|
||||
* sent. Do not try to send this message again.
|
||||
*/
|
||||
static int
|
||||
xprt_rdma_send_request(struct rpc_task *task)
|
||||
xprt_rdma_send_request(struct rpc_rqst *rqst)
|
||||
{
|
||||
struct rpc_rqst *rqst = task->tk_rqstp;
|
||||
struct rpc_xprt *xprt = rqst->rq_xprt;
|
||||
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||
@ -722,6 +715,9 @@ xprt_rdma_send_request(struct rpc_task *task)
|
||||
if (!xprt_connected(xprt))
|
||||
goto drop_connection;
|
||||
|
||||
if (!xprt_request_get_cong(xprt, rqst))
|
||||
return -EBADSLT;
|
||||
|
||||
rc = rpcrdma_marshal_req(r_xprt, rqst);
|
||||
if (rc < 0)
|
||||
goto failed_marshal;
|
||||
@ -741,7 +737,7 @@ xprt_rdma_send_request(struct rpc_task *task)
|
||||
/* An RPC with no reply will throw off credit accounting,
|
||||
* so drop the connection to reset the credit grant.
|
||||
*/
|
||||
if (!rpc_reply_expected(task))
|
||||
if (!rpc_reply_expected(rqst->rq_task))
|
||||
goto drop_connection;
|
||||
return 0;
|
||||
|
||||
@ -766,7 +762,7 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
|
||||
0, /* need a local port? */
|
||||
xprt->stat.bind_count,
|
||||
xprt->stat.connect_count,
|
||||
xprt->stat.connect_time,
|
||||
xprt->stat.connect_time / HZ,
|
||||
idle_time,
|
||||
xprt->stat.sends,
|
||||
xprt->stat.recvs,
|
||||
@ -786,7 +782,7 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
|
||||
r_xprt->rx_stats.bad_reply_count,
|
||||
r_xprt->rx_stats.nomsg_call_count);
|
||||
seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n",
|
||||
r_xprt->rx_stats.mrs_recovered,
|
||||
r_xprt->rx_stats.mrs_recycled,
|
||||
r_xprt->rx_stats.mrs_orphaned,
|
||||
r_xprt->rx_stats.mrs_allocated,
|
||||
r_xprt->rx_stats.local_inv_needed,
|
||||
|
@ -108,20 +108,48 @@ rpcrdma_destroy_wq(void)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcrdma_disconnect_worker - Force a disconnect
|
||||
* @work: endpoint to be disconnected
|
||||
*
|
||||
* Provider callbacks can possibly run in an IRQ context. This function
|
||||
* is invoked in a worker thread to guarantee that disconnect wake-up
|
||||
* calls are always done in process context.
|
||||
*/
|
||||
static void
|
||||
rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
|
||||
rpcrdma_disconnect_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_ep *ep = container_of(work, struct rpcrdma_ep,
|
||||
rep_disconnect_worker.work);
|
||||
struct rpcrdma_xprt *r_xprt =
|
||||
container_of(ep, struct rpcrdma_xprt, rx_ep);
|
||||
|
||||
xprt_force_disconnect(&r_xprt->rx_xprt);
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcrdma_qp_event_handler - Handle one QP event (error notification)
|
||||
* @event: details of the event
|
||||
* @context: ep that owns QP where event occurred
|
||||
*
|
||||
* Called from the RDMA provider (device driver) possibly in an interrupt
|
||||
* context.
|
||||
*/
|
||||
static void
|
||||
rpcrdma_qp_event_handler(struct ib_event *event, void *context)
|
||||
{
|
||||
struct rpcrdma_ep *ep = context;
|
||||
struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
|
||||
rx_ep);
|
||||
|
||||
trace_xprtrdma_qp_error(r_xprt, event);
|
||||
pr_err("rpcrdma: %s on device %s ep %p\n",
|
||||
ib_event_msg(event->event), event->device->name, context);
|
||||
trace_xprtrdma_qp_event(r_xprt, event);
|
||||
pr_err("rpcrdma: %s on device %s connected to %s:%s\n",
|
||||
ib_event_msg(event->event), event->device->name,
|
||||
rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
|
||||
|
||||
if (ep->rep_connected == 1) {
|
||||
ep->rep_connected = -EIO;
|
||||
rpcrdma_conn_func(ep);
|
||||
schedule_delayed_work(&ep->rep_disconnect_worker, 0);
|
||||
wake_up_all(&ep->rep_connect_wait);
|
||||
}
|
||||
}
|
||||
@ -219,38 +247,48 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
|
||||
rpcrdma_set_max_header_sizes(r_xprt);
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcrdma_cm_event_handler - Handle RDMA CM events
|
||||
* @id: rdma_cm_id on which an event has occurred
|
||||
* @event: details of the event
|
||||
*
|
||||
* Called with @id's mutex held. Returns 1 if caller should
|
||||
* destroy @id, otherwise 0.
|
||||
*/
|
||||
static int
|
||||
rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
||||
rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
||||
{
|
||||
struct rpcrdma_xprt *xprt = id->context;
|
||||
struct rpcrdma_ia *ia = &xprt->rx_ia;
|
||||
struct rpcrdma_ep *ep = &xprt->rx_ep;
|
||||
int connstate = 0;
|
||||
struct rpcrdma_xprt *r_xprt = id->context;
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
|
||||
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
||||
|
||||
trace_xprtrdma_conn_upcall(xprt, event);
|
||||
might_sleep();
|
||||
|
||||
trace_xprtrdma_cm_event(r_xprt, event);
|
||||
switch (event->event) {
|
||||
case RDMA_CM_EVENT_ADDR_RESOLVED:
|
||||
case RDMA_CM_EVENT_ROUTE_RESOLVED:
|
||||
ia->ri_async_rc = 0;
|
||||
complete(&ia->ri_done);
|
||||
break;
|
||||
return 0;
|
||||
case RDMA_CM_EVENT_ADDR_ERROR:
|
||||
ia->ri_async_rc = -EPROTO;
|
||||
complete(&ia->ri_done);
|
||||
break;
|
||||
return 0;
|
||||
case RDMA_CM_EVENT_ROUTE_ERROR:
|
||||
ia->ri_async_rc = -ENETUNREACH;
|
||||
complete(&ia->ri_done);
|
||||
break;
|
||||
return 0;
|
||||
case RDMA_CM_EVENT_DEVICE_REMOVAL:
|
||||
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
||||
pr_info("rpcrdma: removing device %s for %s:%s\n",
|
||||
ia->ri_device->name,
|
||||
rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt));
|
||||
rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
|
||||
#endif
|
||||
set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
|
||||
ep->rep_connected = -ENODEV;
|
||||
xprt_force_disconnect(&xprt->rx_xprt);
|
||||
xprt_force_disconnect(xprt);
|
||||
wait_for_completion(&ia->ri_remove_done);
|
||||
|
||||
ia->ri_id = NULL;
|
||||
@ -258,41 +296,40 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
||||
/* Return 1 to ensure the core destroys the id. */
|
||||
return 1;
|
||||
case RDMA_CM_EVENT_ESTABLISHED:
|
||||
++xprt->rx_xprt.connect_cookie;
|
||||
connstate = 1;
|
||||
rpcrdma_update_connect_private(xprt, &event->param.conn);
|
||||
goto connected;
|
||||
++xprt->connect_cookie;
|
||||
ep->rep_connected = 1;
|
||||
rpcrdma_update_connect_private(r_xprt, &event->param.conn);
|
||||
wake_up_all(&ep->rep_connect_wait);
|
||||
break;
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
connstate = -ENOTCONN;
|
||||
goto connected;
|
||||
ep->rep_connected = -ENOTCONN;
|
||||
goto disconnected;
|
||||
case RDMA_CM_EVENT_UNREACHABLE:
|
||||
connstate = -ENETUNREACH;
|
||||
goto connected;
|
||||
ep->rep_connected = -ENETUNREACH;
|
||||
goto disconnected;
|
||||
case RDMA_CM_EVENT_REJECTED:
|
||||
dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
|
||||
rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
|
||||
rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
|
||||
rdma_reject_msg(id, event->status));
|
||||
connstate = -ECONNREFUSED;
|
||||
ep->rep_connected = -ECONNREFUSED;
|
||||
if (event->status == IB_CM_REJ_STALE_CONN)
|
||||
connstate = -EAGAIN;
|
||||
goto connected;
|
||||
ep->rep_connected = -EAGAIN;
|
||||
goto disconnected;
|
||||
case RDMA_CM_EVENT_DISCONNECTED:
|
||||
++xprt->rx_xprt.connect_cookie;
|
||||
connstate = -ECONNABORTED;
|
||||
connected:
|
||||
ep->rep_connected = connstate;
|
||||
rpcrdma_conn_func(ep);
|
||||
++xprt->connect_cookie;
|
||||
ep->rep_connected = -ECONNABORTED;
|
||||
disconnected:
|
||||
xprt_force_disconnect(xprt);
|
||||
wake_up_all(&ep->rep_connect_wait);
|
||||
/*FALLTHROUGH*/
|
||||
break;
|
||||
default:
|
||||
dprintk("RPC: %s: %s:%s on %s/%s (ep 0x%p): %s\n",
|
||||
__func__,
|
||||
rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
|
||||
ia->ri_device->name, ia->ri_ops->ro_displayname,
|
||||
ep, rdma_event_msg(event->event));
|
||||
break;
|
||||
}
|
||||
|
||||
dprintk("RPC: %s: %s:%s on %s/%s: %s\n", __func__,
|
||||
rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
|
||||
ia->ri_device->name, ia->ri_ops->ro_displayname,
|
||||
rdma_event_msg(event->event));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -308,7 +345,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
|
||||
init_completion(&ia->ri_done);
|
||||
init_completion(&ia->ri_remove_done);
|
||||
|
||||
id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_conn_upcall,
|
||||
id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
|
||||
xprt, RDMA_PS_TCP, IB_QPT_RC);
|
||||
if (IS_ERR(id)) {
|
||||
rc = PTR_ERR(id);
|
||||
@ -519,7 +556,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
|
||||
ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
|
||||
ep->rep_attr.qp_context = ep;
|
||||
ep->rep_attr.srq = NULL;
|
||||
ep->rep_attr.cap.max_send_sge = max_sge;
|
||||
@ -542,7 +579,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
||||
cdata->max_requests >> 2);
|
||||
ep->rep_send_count = ep->rep_send_batch;
|
||||
init_waitqueue_head(&ep->rep_connect_wait);
|
||||
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
|
||||
INIT_DELAYED_WORK(&ep->rep_disconnect_worker,
|
||||
rpcrdma_disconnect_worker);
|
||||
|
||||
sendcq = ib_alloc_cq(ia->ri_device, NULL,
|
||||
ep->rep_attr.cap.max_send_wr + 1,
|
||||
@ -615,7 +653,7 @@ out1:
|
||||
void
|
||||
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
|
||||
{
|
||||
cancel_delayed_work_sync(&ep->rep_connect_worker);
|
||||
cancel_delayed_work_sync(&ep->rep_disconnect_worker);
|
||||
|
||||
if (ia->ri_id && ia->ri_id->qp) {
|
||||
rpcrdma_ep_disconnect(ep, ia);
|
||||
@ -728,6 +766,7 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
|
||||
{
|
||||
struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
|
||||
rx_ia);
|
||||
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
||||
int rc;
|
||||
|
||||
retry:
|
||||
@ -754,6 +793,8 @@ retry:
|
||||
}
|
||||
|
||||
ep->rep_connected = 0;
|
||||
xprt_clear_connected(xprt);
|
||||
|
||||
rpcrdma_post_recvs(r_xprt, true);
|
||||
|
||||
rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
|
||||
@ -877,7 +918,6 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
|
||||
sc->sc_xprt = r_xprt;
|
||||
buf->rb_sc_ctxs[i] = sc;
|
||||
}
|
||||
buf->rb_flags = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -977,39 +1017,6 @@ rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
rpcrdma_mr_recovery_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
|
||||
rb_recovery_worker.work);
|
||||
struct rpcrdma_mr *mr;
|
||||
|
||||
spin_lock(&buf->rb_recovery_lock);
|
||||
while (!list_empty(&buf->rb_stale_mrs)) {
|
||||
mr = rpcrdma_mr_pop(&buf->rb_stale_mrs);
|
||||
spin_unlock(&buf->rb_recovery_lock);
|
||||
|
||||
trace_xprtrdma_recover_mr(mr);
|
||||
mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr);
|
||||
|
||||
spin_lock(&buf->rb_recovery_lock);
|
||||
}
|
||||
spin_unlock(&buf->rb_recovery_lock);
|
||||
}
|
||||
|
||||
void
|
||||
rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr)
|
||||
{
|
||||
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||
|
||||
spin_lock(&buf->rb_recovery_lock);
|
||||
rpcrdma_mr_push(mr, &buf->rb_stale_mrs);
|
||||
spin_unlock(&buf->rb_recovery_lock);
|
||||
|
||||
schedule_delayed_work(&buf->rb_recovery_worker, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
|
||||
{
|
||||
@ -1019,7 +1026,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
|
||||
LIST_HEAD(free);
|
||||
LIST_HEAD(all);
|
||||
|
||||
for (count = 0; count < 3; count++) {
|
||||
for (count = 0; count < ia->ri_max_segs; count++) {
|
||||
struct rpcrdma_mr *mr;
|
||||
int rc;
|
||||
|
||||
@ -1138,18 +1145,15 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
|
||||
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
|
||||
int i, rc;
|
||||
|
||||
buf->rb_flags = 0;
|
||||
buf->rb_max_requests = r_xprt->rx_data.max_requests;
|
||||
buf->rb_bc_srv_max_requests = 0;
|
||||
spin_lock_init(&buf->rb_mrlock);
|
||||
spin_lock_init(&buf->rb_lock);
|
||||
spin_lock_init(&buf->rb_recovery_lock);
|
||||
INIT_LIST_HEAD(&buf->rb_mrs);
|
||||
INIT_LIST_HEAD(&buf->rb_all);
|
||||
INIT_LIST_HEAD(&buf->rb_stale_mrs);
|
||||
INIT_DELAYED_WORK(&buf->rb_refresh_worker,
|
||||
rpcrdma_mr_refresh_worker);
|
||||
INIT_DELAYED_WORK(&buf->rb_recovery_worker,
|
||||
rpcrdma_mr_recovery_worker);
|
||||
|
||||
rpcrdma_mrs_create(r_xprt);
|
||||
|
||||
@ -1233,7 +1237,6 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
|
||||
void
|
||||
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
|
||||
{
|
||||
cancel_delayed_work_sync(&buf->rb_recovery_worker);
|
||||
cancel_delayed_work_sync(&buf->rb_refresh_worker);
|
||||
|
||||
rpcrdma_sendctxs_destroy(buf);
|
||||
@ -1326,7 +1329,7 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
|
||||
{
|
||||
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
||||
|
||||
trace_xprtrdma_dma_unmap(mr);
|
||||
trace_xprtrdma_mr_unmap(mr);
|
||||
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
|
||||
mr->mr_sg, mr->mr_nents, mr->mr_dir);
|
||||
__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
|
||||
@ -1518,9 +1521,11 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
|
||||
struct ib_recv_wr *wr, *bad_wr;
|
||||
int needed, count, rc;
|
||||
|
||||
rc = 0;
|
||||
count = 0;
|
||||
needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
|
||||
if (buf->rb_posted_receives > needed)
|
||||
return;
|
||||
goto out;
|
||||
needed -= buf->rb_posted_receives;
|
||||
|
||||
count = 0;
|
||||
@ -1556,7 +1561,7 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
|
||||
--needed;
|
||||
}
|
||||
if (!count)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
|
||||
(const struct ib_recv_wr **)&bad_wr);
|
||||
@ -1570,5 +1575,6 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
|
||||
}
|
||||
}
|
||||
buf->rb_posted_receives += count;
|
||||
out:
|
||||
trace_xprtrdma_post_recvs(r_xprt, count, rc);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ struct rpcrdma_ep {
|
||||
wait_queue_head_t rep_connect_wait;
|
||||
struct rpcrdma_connect_private rep_cm_private;
|
||||
struct rdma_conn_param rep_remote_cma;
|
||||
struct delayed_work rep_connect_worker;
|
||||
struct delayed_work rep_disconnect_worker;
|
||||
};
|
||||
|
||||
/* Pre-allocate extra Work Requests for handling backward receives
|
||||
@ -280,6 +280,7 @@ struct rpcrdma_mr {
|
||||
u32 mr_handle;
|
||||
u32 mr_length;
|
||||
u64 mr_offset;
|
||||
struct work_struct mr_recycle;
|
||||
struct list_head mr_all;
|
||||
};
|
||||
|
||||
@ -411,9 +412,6 @@ struct rpcrdma_buffer {
|
||||
|
||||
u32 rb_bc_max_requests;
|
||||
|
||||
spinlock_t rb_recovery_lock; /* protect rb_stale_mrs */
|
||||
struct list_head rb_stale_mrs;
|
||||
struct delayed_work rb_recovery_worker;
|
||||
struct delayed_work rb_refresh_worker;
|
||||
};
|
||||
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
|
||||
@ -452,7 +450,7 @@ struct rpcrdma_stats {
|
||||
unsigned long hardway_register_count;
|
||||
unsigned long failed_marshal_count;
|
||||
unsigned long bad_reply_count;
|
||||
unsigned long mrs_recovered;
|
||||
unsigned long mrs_recycled;
|
||||
unsigned long mrs_orphaned;
|
||||
unsigned long mrs_allocated;
|
||||
unsigned long empty_sendctx_q;
|
||||
@ -481,7 +479,6 @@ struct rpcrdma_memreg_ops {
|
||||
struct list_head *mrs);
|
||||
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
|
||||
struct list_head *);
|
||||
void (*ro_recover_mr)(struct rpcrdma_mr *mr);
|
||||
int (*ro_open)(struct rpcrdma_ia *,
|
||||
struct rpcrdma_ep *,
|
||||
struct rpcrdma_create_data_internal *);
|
||||
@ -559,7 +556,6 @@ int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
|
||||
struct rpcrdma_create_data_internal *);
|
||||
void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
|
||||
int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
|
||||
void rpcrdma_conn_func(struct rpcrdma_ep *ep);
|
||||
void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
|
||||
|
||||
int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
|
||||
@ -578,7 +574,12 @@ struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
|
||||
struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
|
||||
void rpcrdma_mr_put(struct rpcrdma_mr *mr);
|
||||
void rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr);
|
||||
void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr);
|
||||
|
||||
static inline void
|
||||
rpcrdma_mr_recycle(struct rpcrdma_mr *mr)
|
||||
{
|
||||
schedule_work(&mr->mr_recycle);
|
||||
}
|
||||
|
||||
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
|
||||
void rpcrdma_buffer_put(struct rpcrdma_req *);
|
||||
@ -652,7 +653,6 @@ static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
|
||||
extern unsigned int xprt_rdma_max_inline_read;
|
||||
void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
|
||||
void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
|
||||
void rpcrdma_connect_worker(struct work_struct *work);
|
||||
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
|
||||
int xprt_rdma_init(void);
|
||||
void xprt_rdma_cleanup(void);
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user