mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
svcrdma: Add an async version of svc_rdma_send_ctxt_put()
DMA unmapping can take quite some time, so it should not be handled in a single-threaded completion handler. Defer releasing send_ctxts to the recently-added workqueue. With this patch, DMA unmapping can be handled in parallel, and it does not cause head-of-queue blocking of Send completions. Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
parent
9c7e1a0658
commit
ae225fe27b
@ -152,7 +152,9 @@ struct svc_rdma_recv_ctxt {
|
||||
struct svc_rdma_send_ctxt {
|
||||
struct llist_node sc_node;
|
||||
struct rpc_rdma_cid sc_cid;
|
||||
struct work_struct sc_work;
|
||||
|
||||
struct svcxprt_rdma *sc_rdma;
|
||||
struct ib_send_wr sc_send_wr;
|
||||
struct ib_cqe sc_cqe;
|
||||
struct xdr_buf sc_hdrbuf;
|
||||
|
@ -143,6 +143,7 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
|
||||
|
||||
svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
|
||||
|
||||
ctxt->sc_rdma = rdma;
|
||||
ctxt->sc_send_wr.next = NULL;
|
||||
ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
|
||||
ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
|
||||
@ -223,15 +224,8 @@ out_empty:
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_send_ctxt_put - Return send_ctxt to free list
|
||||
* @rdma: controlling svcxprt_rdma
|
||||
* @ctxt: object to return to the free list
|
||||
*
|
||||
* Pages left in sc_pages are DMA unmapped and released.
|
||||
*/
|
||||
void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *ctxt)
|
||||
static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *ctxt)
|
||||
{
|
||||
struct ib_device *device = rdma->sc_cm_id->device;
|
||||
unsigned int i;
|
||||
@ -255,6 +249,28 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
|
||||
llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
|
||||
}
|
||||
|
||||
static void svc_rdma_send_ctxt_put_async(struct work_struct *work)
|
||||
{
|
||||
struct svc_rdma_send_ctxt *ctxt;
|
||||
|
||||
ctxt = container_of(work, struct svc_rdma_send_ctxt, sc_work);
|
||||
svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt);
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_send_ctxt_put - Return send_ctxt to free list
|
||||
* @rdma: controlling svcxprt_rdma
|
||||
* @ctxt: object to return to the free list
|
||||
*
|
||||
* Pages left in sc_pages are DMA unmapped and released.
|
||||
*/
|
||||
void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_send_ctxt *ctxt)
|
||||
{
|
||||
INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async);
|
||||
queue_work(svcrdma_wq, &ctxt->sc_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* svc_rdma_wake_send_waiters - manage Send Queue accounting
|
||||
* @rdma: controlling transport
|
||||
|
Loading…
Reference in New Issue
Block a user