NFS client bugfixes for Linux 5.13

Highlights include:
 
 Stable fixes
 - Fix v4.0/v4.1 SEEK_DATA return -ENOTSUPP when set NFS_V4_2 config
 - Fix Oops in xs_tcp_send_request() when transport is disconnected
 - Fix a NULL pointer dereference in pnfs_mark_matching_lsegs_return()
 
 Bugfixes
 - Fix instances where signal_pending() should be fatal_signal_pending()
 - fix an incorrect limit in filelayout_decode_layout()
 - Fixes for the SUNRPC backlogged RPC queue
 - Don't corrupt the value of pg_bytes_written in nfs_do_recoalesce()
 - Revert commit 586a0787ce ("Clean up rpcrdma_prepare_readch()")
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEESQctxSBg8JpV8KqEZwvnipYKAPIFAmCvomgACgkQZwvnipYK
 APLg6xAAqlR/HLNYOLAToQ6d9wzrL6Po3x8Lx7VURjCkFaoKB/jMq3Zbu/K8mZ+X
 CC6/XFtB9AikloK7sle6lRPuwwPL6y+vOML0Ais/dkYPNkbhe9ylf0rsYQiPljXT
 8PAcqn8FXTZ9fKpKU8Quw24X1Jfkk6zUEeMy50HYDBfTx+gYEojMEKa6cl4URGzO
 2JpuBO4Ku/vWDOPj7bWBX9wi7wkrJjGSYDnx1A5SOgUdV87H8VJkbTo9vVdEwFoE
 OtE8MQmFhdton0u9+MKImFQdVfxoYLB1Ig1G45NXGHee91dwfYU0U05THj7E/xP9
 RQWtmJcKdvY1w8sRK/PNEHo43Vkow4usffSrIWNBZ6aO5EkbQFn1tmKMSDtsrkZ2
 ONMfKBiEhhQSy+QRXMR/RC86t4dsQ8SApu62qQT4VuuXqzYhrBum2DqkW0X6Zcti
 gi17+PfjRbgWNvul2yegBvDU016H324aCeT9nfWe0D9iwF7tPK4xsuNTYrWwbFOA
 YFAecIXoyBRtbIV6NZ95/+P5HEFBLAYewEVLpdAOBGQ9fjO023ERiC2sitl5P+ku
 v6V+4HAtBgcPfm/8BZwUYUYBpXnnTZFTizqdJdGGydPXeC671gANPJe6e4xOttCK
 frXFGd9OOqPSXdsRZLUVvhczTOFOGa/UVVG0GxIr4ggy8oKjDMk=
 =66ks
 -----END PGP SIGNATURE-----

Merge tag 'nfs-for-5.13-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client bugfixes from Trond Myklebust:
"Stable fixes:
   - Fix v4.0/v4.1 SEEK_DATA return -ENOTSUPP when set NFS_V4_2 config
   - Fix Oops in xs_tcp_send_request() when transport is disconnected
   - Fix a NULL pointer dereference in pnfs_mark_matching_lsegs_return()

  Bugfixes:
   - Fix instances where signal_pending() should be fatal_signal_pending()
   - fix an incorrect limit in filelayout_decode_layout()
   - Fixes for the SUNRPC backlogged RPC queue
   - Don't corrupt the value of pg_bytes_written in nfs_do_recoalesce()
   - Revert commit 586a0787ce ("Clean up rpcrdma_prepare_readch()")"

* tag 'nfs-for-5.13-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  nfs: Remove trailing semicolon in macros
  xprtrdma: Revert 586a0787ce
  NFSv4: Fix v4.0/v4.1 SEEK_DATA return -ENOTSUPP when set NFS_V4_2 config
  NFS: Clean up reset of the mirror accounting variables
  NFS: Don't corrupt the value of pg_bytes_written in nfs_do_recoalesce()
  NFS: Fix an Oopsable condition in __nfs_pageio_add_request()
  SUNRPC: More fixes for backlog congestion
  SUNRPC: Fix Oops in xs_tcp_send_request() when transport is disconnected
  NFSv4: Fix a NULL pointer dereference in pnfs_mark_matching_lsegs_return()
  SUNRPC in case of backlog, hand free slots directly to waiting task
  pNFS/NFSv4: Remove redundant initialization of 'rd_size'
  NFS: fix an incorrect limit in filelayout_decode_layout()
  fs/nfs: Use fatal_signal_pending instead of signal_pending
This commit is contained in:
Linus Torvalds 2021-05-28 08:53:19 -10:00
commit 5ff2756afd
15 changed files with 98 additions and 64 deletions

View File

@ -718,7 +718,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
if (unlikely(!p)) if (unlikely(!p))
goto out_err; goto out_err;
fl->fh_array[i]->size = be32_to_cpup(p++); fl->fh_array[i]->size = be32_to_cpup(p++);
if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { if (fl->fh_array[i]->size > NFS_MAXFHSIZE) {
printk(KERN_ERR "NFS: Too big fh %d received %d\n", printk(KERN_ERR "NFS: Too big fh %d received %d\n",
i, fl->fh_array[i]->size); i, fl->fh_array[i]->size);
goto out_err; goto out_err;

View File

@ -362,7 +362,7 @@ static const struct kernel_param_ops param_ops_nfs_timeout = {
.set = param_set_nfs_timeout, .set = param_set_nfs_timeout,
.get = param_get_nfs_timeout, .get = param_get_nfs_timeout,
}; };
#define param_check_nfs_timeout(name, p) __param_check(name, p, int); #define param_check_nfs_timeout(name, p) __param_check(name, p, int)
module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644); module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644);
MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout, MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout,

View File

@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
case SEEK_HOLE: case SEEK_HOLE:
case SEEK_DATA: case SEEK_DATA:
ret = nfs42_proc_llseek(filep, offset, whence); ret = nfs42_proc_llseek(filep, offset, whence);
if (ret != -ENOTSUPP) if (ret != -EOPNOTSUPP)
return ret; return ret;
fallthrough; fallthrough;
default: default:

View File

@ -1706,7 +1706,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
rcu_read_unlock(); rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
if (!signal_pending(current)) { if (!fatal_signal_pending(current)) {
if (schedule_timeout(5*HZ) == 0) if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN; status = -EAGAIN;
else else
@ -3487,7 +3487,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
write_sequnlock(&state->seqlock); write_sequnlock(&state->seqlock);
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
if (signal_pending(current)) if (fatal_signal_pending(current))
status = -EINTR; status = -EINTR;
else else
if (schedule_timeout(5*HZ) != 0) if (schedule_timeout(5*HZ) != 0)

View File

@ -1094,15 +1094,16 @@ nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *prev = NULL; struct nfs_page *prev = NULL;
unsigned int size; unsigned int size;
if (mirror->pg_count != 0) { if (list_empty(&mirror->pg_list)) {
prev = nfs_list_entry(mirror->pg_list.prev);
} else {
if (desc->pg_ops->pg_init) if (desc->pg_ops->pg_init)
desc->pg_ops->pg_init(desc, req); desc->pg_ops->pg_init(desc, req);
if (desc->pg_error < 0) if (desc->pg_error < 0)
return 0; return 0;
mirror->pg_base = req->wb_pgbase; mirror->pg_base = req->wb_pgbase;
} mirror->pg_count = 0;
mirror->pg_recoalesce = 0;
} else
prev = nfs_list_entry(mirror->pg_list.prev);
if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) { if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR) if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR)
@ -1127,18 +1128,13 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{ {
struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
if (!list_empty(&mirror->pg_list)) { if (!list_empty(&mirror->pg_list)) {
int error = desc->pg_ops->pg_doio(desc); int error = desc->pg_ops->pg_doio(desc);
if (error < 0) if (error < 0)
desc->pg_error = error; desc->pg_error = error;
else if (list_empty(&mirror->pg_list))
mirror->pg_bytes_written += mirror->pg_count; mirror->pg_bytes_written += mirror->pg_count;
} }
if (list_empty(&mirror->pg_list)) {
mirror->pg_count = 0;
mirror->pg_base = 0;
}
} }
static void static void
@ -1227,10 +1223,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
do { do {
list_splice_init(&mirror->pg_list, &head); list_splice_init(&mirror->pg_list, &head);
mirror->pg_bytes_written -= mirror->pg_count;
mirror->pg_count = 0;
mirror->pg_base = 0;
mirror->pg_recoalesce = 0;
while (!list_empty(&head)) { while (!list_empty(&head)) {
struct nfs_page *req; struct nfs_page *req;

View File

@ -1317,6 +1317,11 @@ _pnfs_return_layout(struct inode *ino)
{ {
struct pnfs_layout_hdr *lo = NULL; struct pnfs_layout_hdr *lo = NULL;
struct nfs_inode *nfsi = NFS_I(ino); struct nfs_inode *nfsi = NFS_I(ino);
struct pnfs_layout_range range = {
.iomode = IOMODE_ANY,
.offset = 0,
.length = NFS4_MAX_UINT64,
};
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
const struct cred *cred; const struct cred *cred;
nfs4_stateid stateid; nfs4_stateid stateid;
@ -1344,16 +1349,10 @@ _pnfs_return_layout(struct inode *ino)
} }
valid_layout = pnfs_layout_is_valid(lo); valid_layout = pnfs_layout_is_valid(lo);
pnfs_clear_layoutcommit(ino, &tmp_list); pnfs_clear_layoutcommit(ino, &tmp_list);
pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0); pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0);
if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { if (NFS_SERVER(ino)->pnfs_curr_ld->return_range)
struct pnfs_layout_range range = {
.iomode = IOMODE_ANY,
.offset = 0,
.length = NFS4_MAX_UINT64,
};
NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range); NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
}
/* Don't send a LAYOUTRETURN if list was initially empty */ /* Don't send a LAYOUTRETURN if list was initially empty */
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) || if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
@ -2678,7 +2677,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
void void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{ {
u64 rd_size = req->wb_bytes; u64 rd_size;
pnfs_generic_pg_check_layout(pgio); pnfs_generic_pg_check_layout(pgio);
pnfs_generic_pg_check_range(pgio, req); pnfs_generic_pg_check_range(pgio, req);

View File

@ -1379,7 +1379,7 @@ static const struct kernel_param_ops param_ops_portnr = {
.set = param_set_portnr, .set = param_set_portnr,
.get = param_get_uint, .get = param_get_uint,
}; };
#define param_check_portnr(name, p) __param_check(name, p, unsigned int); #define param_check_portnr(name, p) __param_check(name, p, unsigned int)
module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644); module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644); module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644);

View File

@ -368,6 +368,8 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc, unsigned int num_prealloc,
unsigned int max_req); unsigned int max_req);
void xprt_free(struct rpc_xprt *); void xprt_free(struct rpc_xprt *);
void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
static inline int static inline int
xprt_enable_swap(struct rpc_xprt *xprt) xprt_enable_swap(struct rpc_xprt *xprt)

View File

@ -1677,13 +1677,6 @@ call_reserveresult(struct rpc_task *task)
return; return;
} }
/*
* Even though there was an error, we may have acquired
* a request slot somehow. Make sure not to leak it.
*/
if (task->tk_rqstp)
xprt_release(task);
switch (status) { switch (status) {
case -ENOMEM: case -ENOMEM:
rpc_delay(task, HZ >> 2); rpc_delay(task, HZ >> 2);

View File

@ -70,6 +70,7 @@
static void xprt_init(struct rpc_xprt *xprt, struct net *net); static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
static void xprt_destroy(struct rpc_xprt *xprt); static void xprt_destroy(struct rpc_xprt *xprt);
static void xprt_request_init(struct rpc_task *task);
static DEFINE_SPINLOCK(xprt_list_lock); static DEFINE_SPINLOCK(xprt_list_lock);
static LIST_HEAD(xprt_list); static LIST_HEAD(xprt_list);
@ -1606,17 +1607,40 @@ xprt_transmit(struct rpc_task *task)
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
} }
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) static void xprt_complete_request_init(struct rpc_task *task)
{ {
set_bit(XPRT_CONGESTED, &xprt->state); if (task->tk_rqstp)
rpc_sleep_on(&xprt->backlog, task, NULL); xprt_request_init(task);
} }
static void xprt_wake_up_backlog(struct rpc_xprt *xprt) void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{ {
if (rpc_wake_up_next(&xprt->backlog) == NULL) set_bit(XPRT_CONGESTED, &xprt->state);
clear_bit(XPRT_CONGESTED, &xprt->state); rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
} }
EXPORT_SYMBOL_GPL(xprt_add_backlog);
static bool __xprt_set_rq(struct rpc_task *task, void *data)
{
struct rpc_rqst *req = data;
if (task->tk_rqstp == NULL) {
memset(req, 0, sizeof(*req)); /* mark unused */
task->tk_rqstp = req;
return true;
}
return false;
}
bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
clear_bit(XPRT_CONGESTED, &xprt->state);
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{ {
@ -1626,7 +1650,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task
goto out; goto out;
spin_lock(&xprt->reserve_lock); spin_lock(&xprt->reserve_lock);
if (test_bit(XPRT_CONGESTED, &xprt->state)) { if (test_bit(XPRT_CONGESTED, &xprt->state)) {
rpc_sleep_on(&xprt->backlog, task, NULL); xprt_add_backlog(xprt, task);
ret = true; ret = true;
} }
spin_unlock(&xprt->reserve_lock); spin_unlock(&xprt->reserve_lock);
@ -1703,11 +1727,11 @@ EXPORT_SYMBOL_GPL(xprt_alloc_slot);
void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{ {
spin_lock(&xprt->reserve_lock); spin_lock(&xprt->reserve_lock);
if (!xprt_dynamic_free_slot(xprt, req)) { if (!xprt_wake_up_backlog(xprt, req) &&
!xprt_dynamic_free_slot(xprt, req)) {
memset(req, 0, sizeof(*req)); /* mark unused */ memset(req, 0, sizeof(*req)); /* mark unused */
list_add(&req->rq_list, &xprt->free); list_add(&req->rq_list, &xprt->free);
} }
xprt_wake_up_backlog(xprt);
spin_unlock(&xprt->reserve_lock); spin_unlock(&xprt->reserve_lock);
} }
EXPORT_SYMBOL_GPL(xprt_free_slot); EXPORT_SYMBOL_GPL(xprt_free_slot);
@ -1894,10 +1918,10 @@ void xprt_release(struct rpc_task *task)
xdr_free_bvec(&req->rq_snd_buf); xdr_free_bvec(&req->rq_snd_buf);
if (req->rq_cred != NULL) if (req->rq_cred != NULL)
put_rpccred(req->rq_cred); put_rpccred(req->rq_cred);
task->tk_rqstp = NULL;
if (req->rq_release_snd_buf) if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req); req->rq_release_snd_buf(req);
task->tk_rqstp = NULL;
if (likely(!bc_prealloc(req))) if (likely(!bc_prealloc(req)))
xprt->ops->free_slot(xprt, req); xprt->ops->free_slot(xprt, req);
else else

View File

@ -628,8 +628,9 @@ out_mapping_err:
return false; return false;
} }
/* The tail iovec might not reside in the same page as the /* The tail iovec may include an XDR pad for the page list,
* head iovec. * as well as additional content, and may not reside in the
* same page as the head iovec.
*/ */
static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req, static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
struct xdr_buf *xdr, struct xdr_buf *xdr,
@ -747,19 +748,27 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, struct rpcrdma_req *req,
struct xdr_buf *xdr) struct xdr_buf *xdr)
{ {
struct kvec *tail = &xdr->tail[0];
if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
return false; return false;
/* If there is a Read chunk, the page list is handled /* If there is a Read chunk, the page list is being handled
* via explicit RDMA, and thus is skipped here. * via explicit RDMA, and thus is skipped here.
*/ */
if (tail->iov_len) { /* Do not include the tail if it is only an XDR pad */
if (!rpcrdma_prepare_tail_iov(req, xdr, if (xdr->tail[0].iov_len > 3) {
offset_in_page(tail->iov_base), unsigned int page_base, len;
tail->iov_len))
/* If the content in the page list is an odd length,
* xdr_write_pages() adds a pad at the beginning of
* the tail iovec. Force the tail's non-pad content to
* land at the next XDR position in the Send message.
*/
page_base = offset_in_page(xdr->tail[0].iov_base);
len = xdr->tail[0].iov_len;
page_base += len & 3;
len -= len & 3;
if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
return false; return false;
kref_get(&req->rl_kref); kref_get(&req->rl_kref);
} }

View File

@ -520,9 +520,8 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
return; return;
out_sleep: out_sleep:
set_bit(XPRT_CONGESTED, &xprt->state);
rpc_sleep_on(&xprt->backlog, task, NULL);
task->tk_status = -EAGAIN; task->tk_status = -EAGAIN;
xprt_add_backlog(xprt, task);
} }
/** /**
@ -537,10 +536,11 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
struct rpcrdma_xprt *r_xprt = struct rpcrdma_xprt *r_xprt =
container_of(xprt, struct rpcrdma_xprt, rx_xprt); container_of(xprt, struct rpcrdma_xprt, rx_xprt);
memset(rqst, 0, sizeof(*rqst)); rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); if (!xprt_wake_up_backlog(xprt, rqst)) {
if (unlikely(!rpc_wake_up_next(&xprt->backlog))) memset(rqst, 0, sizeof(*rqst));
clear_bit(XPRT_CONGESTED, &xprt->state); rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst));
}
} }
static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt, static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,

View File

@ -1200,6 +1200,20 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
return mr; return mr;
} }
/**
* rpcrdma_reply_put - Put reply buffers back into pool
* @buffers: buffer pool
* @req: object to return
*
*/
void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
if (req->rl_reply) {
rpcrdma_rep_put(buffers, req->rl_reply);
req->rl_reply = NULL;
}
}
/** /**
* rpcrdma_buffer_get - Get a request buffer * rpcrdma_buffer_get - Get a request buffer
* @buffers: Buffer pool from which to obtain a buffer * @buffers: Buffer pool from which to obtain a buffer
@ -1228,9 +1242,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
*/ */
void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{ {
if (req->rl_reply) rpcrdma_reply_put(buffers, req);
rpcrdma_rep_put(buffers, req->rl_reply);
req->rl_reply = NULL;
spin_lock(&buffers->rb_lock); spin_lock(&buffers->rb_lock);
list_add(&req->rl_list, &buffers->rb_send_bufs); list_add(&req->rl_list, &buffers->rb_send_bufs);

View File

@ -479,6 +479,7 @@ struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
struct rpcrdma_req *req); struct rpcrdma_req *req);
void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep); void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep);
void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req);
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
gfp_t flags); gfp_t flags);

View File

@ -1010,6 +1010,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req)
kernel_sock_shutdown(transport->sock, SHUT_RDWR); kernel_sock_shutdown(transport->sock, SHUT_RDWR);
return -ENOTCONN; return -ENOTCONN;
} }
if (!transport->inet)
return -ENOTCONN;
xs_pktdump("packet data:", xs_pktdump("packet data:",
req->rq_svec->iov_base, req->rq_svec->iov_base,