iw_cxgb4: allocate wait object for each cq object

Remove the local stack allocated c4iw_wr_wait object in preparation for
correctly handling timeouts.

Also cleaned up some error path unwind logic to make it more readable.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Steve Wise 2017-09-26 13:08:08 -07:00 committed by Doug Ledford
parent a3f12da0e9
commit 13ce83174a
2 changed files with 37 additions and 27 deletions

View File

@ -33,12 +33,12 @@
#include "iw_cxgb4.h"
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
struct c4iw_wr_wait *wr_waitp)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
struct c4iw_wr_wait wr_wait;
int ret;
wr_len = sizeof *res_wr + sizeof *res;
@ -50,16 +50,16 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (uintptr_t)&wr_wait;
res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET;
res->u.cq.iqid = cpu_to_be32(cq->cqid);
c4iw_init_wr_wait(&wr_wait);
c4iw_init_wr_wait(wr_waitp);
ret = c4iw_ofld_send(rdev, skb);
if (!ret) {
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
ret = c4iw_wait_for_reply(rdev, wr_waitp, 0, 0, __func__);
}
kfree(cq->sw_queue);
@ -71,13 +71,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
}
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
struct c4iw_dev_ucontext *uctx)
struct c4iw_dev_ucontext *uctx,
struct c4iw_wr_wait *wr_waitp)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
int user = (uctx != &rdev->uctx);
struct c4iw_wr_wait wr_wait;
int ret;
struct sk_buff *skb;
@ -119,7 +119,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (uintptr_t)&wr_wait;
res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE;
@ -139,13 +139,13 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
c4iw_init_wr_wait(&wr_wait);
c4iw_init_wr_wait(wr_waitp);
ret = c4iw_ofld_send(rdev, skb);
if (ret)
goto err4;
pr_debug("wait_event wr_wait %p\n", &wr_wait);
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
pr_debug("wait_event wr_wait %p\n", wr_waitp);
ret = c4iw_wait_for_reply(rdev, wr_waitp, 0, 0, __func__);
if (ret)
goto err4;
@ -868,8 +868,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
: NULL;
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
chp->destroy_skb);
chp->destroy_skb = NULL;
chp->destroy_skb, chp->wr_waitp);
kfree(chp->wr_waitp);
kfree(chp);
return 0;
}
@ -901,12 +901,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
chp->wr_waitp = kzalloc(sizeof(*chp->wr_waitp), GFP_KERNEL);
if (!chp->wr_waitp) {
ret = -ENOMEM;
goto err_free_chp;
}
c4iw_init_wr_wait(chp->wr_waitp);
wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
if (!chp->destroy_skb) {
ret = -ENOMEM;
goto err1;
goto err_free_wr_wait;
}
if (ib_context)
@ -947,9 +953,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp->cq.vector = vector;
ret = create_cq(&rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
chp->wr_waitp);
if (ret)
goto err2;
goto err_free_skb;
chp->rhp = rhp;
chp->cq.size--; /* status page */
@ -960,16 +967,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
if (ret)
goto err3;
goto err_destroy_cq;
if (ucontext) {
ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm)
goto err4;
goto err_remove_handle;
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2)
goto err5;
goto err_free_mm;
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
@ -984,7 +991,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
goto err6;
goto err_free_mm2;
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
@ -1000,19 +1007,21 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp->cq.cqid, chp, chp->cq.size,
chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
return &chp->ibcq;
err6:
err_free_mm2:
kfree(mm2);
err5:
err_free_mm:
kfree(mm);
err4:
err_remove_handle:
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
err3:
err_destroy_cq:
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
chp->destroy_skb);
err2:
chp->destroy_skb, chp->wr_waitp);
err_free_skb:
kfree_skb(chp->destroy_skb);
err1:
err_free_wr_wait:
kfree(chp->wr_waitp);
err_free_chp:
kfree(chp);
return ERR_PTR(ret);
}

View File

@ -425,6 +425,7 @@ struct c4iw_cq {
spinlock_t comp_handler_lock;
atomic_t refcnt;
wait_queue_head_t wait;
struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)