mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-14 15:54:15 +08:00
xprtrdma: Recycle MRs after disconnect
The optimization done in "xprtrdma: Simplify rpcrdma_mr_pop" was a bit too optimistic. MRs left over after a reconnect still need to be recycled, not added back to the free list, since they could be in flight or actually fully registered. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
f836b27eca
commit
ee2f412ece
@ -88,15 +88,8 @@ void frwr_release_mr(struct rpcrdma_mr *mr)
|
||||
kfree(mr);
|
||||
}
|
||||
|
||||
/* MRs are dynamically allocated, so simply clean up and release the MR.
|
||||
* A replacement MR will subsequently be allocated on demand.
|
||||
*/
|
||||
static void
|
||||
frwr_mr_recycle_worker(struct work_struct *work)
|
||||
static void frwr_mr_recycle(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
|
||||
{
|
||||
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
|
||||
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
|
||||
|
||||
trace_xprtrdma_mr_recycle(mr);
|
||||
|
||||
if (mr->mr_dir != DMA_NONE) {
|
||||
@ -114,6 +107,32 @@ frwr_mr_recycle_worker(struct work_struct *work)
|
||||
frwr_release_mr(mr);
|
||||
}
|
||||
|
||||
/* MRs are dynamically allocated, so simply clean up and release the MR.
|
||||
* A replacement MR will subsequently be allocated on demand.
|
||||
*/
|
||||
static void
|
||||
frwr_mr_recycle_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr,
|
||||
mr_recycle);
|
||||
|
||||
frwr_mr_recycle(mr->mr_xprt, mr);
|
||||
}
|
||||
|
||||
/* frwr_recycle - Discard MRs
|
||||
* @req: request to reset
|
||||
*
|
||||
* Used after a reconnect. These MRs could be in flight, we can't
|
||||
* tell. Safe thing to do is release them.
|
||||
*/
|
||||
void frwr_recycle(struct rpcrdma_req *req)
|
||||
{
|
||||
struct rpcrdma_mr *mr;
|
||||
|
||||
while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
|
||||
frwr_mr_recycle(mr->mr_xprt, mr);
|
||||
}
|
||||
|
||||
/* frwr_reset - Place MRs back on the free list
|
||||
* @req: request to reset
|
||||
*
|
||||
|
@ -867,7 +867,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
|
||||
* chunks. Very likely the connection has been replaced,
|
||||
* so these registrations are invalid and unusable.
|
||||
*/
|
||||
frwr_reset(req);
|
||||
frwr_recycle(req);
|
||||
|
||||
/* This implementation supports the following combinations
|
||||
* of chunk lists in one RPC-over-RDMA Call message:
|
||||
|
@ -542,6 +542,7 @@ rpcrdma_data_dir(bool writing)
|
||||
/* Memory registration calls xprtrdma/frwr_ops.c
|
||||
*/
|
||||
bool frwr_is_supported(struct ib_device *device);
|
||||
void frwr_recycle(struct rpcrdma_req *req);
|
||||
void frwr_reset(struct rpcrdma_req *req);
|
||||
int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
|
||||
int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
|
||||
|
Loading…
Reference in New Issue
Block a user