svcrdma: Remove unneeded spin locks from __svc_rdma_free

At the time __svc_rdma_free is called, we are guaranteed that all references
to this transport are gone. There is, therefore, no need to protect the
resource lists with a spin lock.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
This commit is contained in:
Tom Tucker 2008-05-28 13:20:24 -05:00
parent 87295b6c5c
commit 94dba4918d

View File

@ -1027,7 +1027,6 @@ static void __svc_rdma_free(struct work_struct *work)
* cm_id because the device ptr is needed to unmap the dma in
* svc_rdma_put_context.
*/
spin_lock_bh(&rdma->sc_read_complete_lock);
while (!list_empty(&rdma->sc_read_complete_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_entry(rdma->sc_read_complete_q.next,
@ -1036,10 +1035,8 @@ static void __svc_rdma_free(struct work_struct *work)
list_del_init(&ctxt->dto_q);
svc_rdma_put_context(ctxt, 1);
}
spin_unlock_bh(&rdma->sc_read_complete_lock);
/* Destroy queued, but not processed recv completions */
spin_lock_bh(&rdma->sc_rq_dto_lock);
while (!list_empty(&rdma->sc_rq_dto_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_entry(rdma->sc_rq_dto_q.next,
@ -1048,7 +1045,6 @@ static void __svc_rdma_free(struct work_struct *work)
list_del_init(&ctxt->dto_q);
svc_rdma_put_context(ctxt, 1);
}
spin_unlock_bh(&rdma->sc_rq_dto_lock);
/* Warn if we leaked a resource or under-referenced */
WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);