mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-23 22:34:21 +08:00
bnxt_re: fix the regression due to changes in alloc_pbl
While adding the use of for_each_sg_dma_page iterator for Brodcom's rdma
driver, there was a regression added in the __alloc_pbl path. The change
left bnxt_re in DOA state in for-next branch.
Fixing the regression to avoid the host crash when a user space object is
created. Restricting the unconditional access to hwq.pg_arr when hwq is
initialized for user space objects.
Fixes: 161ebe2498
("RDMA/bnxt_re: Use for_each_sg_dma_page iterator on umem SGL")
Reported-by: Gal Pressman <galpress@amazon.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
2612d723aa
commit
c50866e285
@ -793,8 +793,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
|
||||
{
|
||||
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
|
||||
struct bnxt_re_dev *rdev = qp->rdev;
|
||||
int rc;
|
||||
unsigned int flags;
|
||||
int rc;
|
||||
|
||||
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
|
||||
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
|
||||
@ -803,9 +803,12 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
|
||||
return rc;
|
||||
}
|
||||
|
||||
flags = bnxt_re_lock_cqs(qp);
|
||||
bnxt_qplib_clean_qp(&qp->qplib_qp);
|
||||
bnxt_re_unlock_cqs(qp, flags);
|
||||
if (!rdma_is_kernel_res(&qp->ib_qp.res)) {
|
||||
flags = bnxt_re_lock_cqs(qp);
|
||||
bnxt_qplib_clean_qp(&qp->qplib_qp);
|
||||
bnxt_re_unlock_cqs(qp, flags);
|
||||
}
|
||||
|
||||
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
|
||||
|
||||
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
|
||||
|
@ -862,18 +862,18 @@ exit:
|
||||
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
|
||||
struct cmdq_create_qp req;
|
||||
struct creq_create_qp_resp resp;
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
struct sq_psn_search **psn_search_ptr;
|
||||
unsigned long int psn_search, poff = 0;
|
||||
struct sq_psn_search **psn_search_ptr;
|
||||
struct bnxt_qplib_q *sq = &qp->sq;
|
||||
struct bnxt_qplib_q *rq = &qp->rq;
|
||||
int i, rc, req_size, psn_sz = 0;
|
||||
struct sq_send **hw_sq_send_ptr;
|
||||
struct creq_create_qp_resp resp;
|
||||
struct bnxt_qplib_hwq *xrrq;
|
||||
u16 cmd_flags = 0, max_ssge;
|
||||
u32 sw_prod, qp_flags = 0;
|
||||
struct cmdq_create_qp req;
|
||||
struct bnxt_qplib_pbl *pbl;
|
||||
u32 qp_flags = 0;
|
||||
u16 max_rsge;
|
||||
|
||||
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
|
||||
@ -948,14 +948,6 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
|
||||
CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
|
||||
|
||||
/* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
|
||||
hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
|
||||
for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
|
||||
hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
|
||||
[get_sqe_idx(sw_prod)];
|
||||
hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
|
||||
}
|
||||
|
||||
if (qp->scq)
|
||||
req.scq_cid = cpu_to_le32(qp->scq->id);
|
||||
|
||||
|
@ -119,11 +119,8 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
|
||||
for_each_sg_dma_page (sghead, &sg_iter, pages, 0) {
|
||||
pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
|
||||
pbl->pg_arr[i] = NULL;
|
||||
if (!pbl->pg_arr[i])
|
||||
goto fail;
|
||||
|
||||
i++;
|
||||
pbl->pg_count++;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user