RDMA/irdma: Propagate error codes

All functions now return linux error codes. Propagate the return from
these functions as opposed to converting them to generic values.

Link: https://lore.kernel.org/r/20220217151851.1518-3-shiraz.saleem@intel.com
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Shiraz Saleem 2022-02-17 09:18:50 -06:00 committed by Jason Gunthorpe
parent 2c4b14ea95
commit 45225a93cc
3 changed files with 13 additions and 17 deletions

View File

@ -1099,7 +1099,7 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask); irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
if (status) { if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
return -EINVAL; return status;
} }
msix_vec->ceq_id = ceq_id; msix_vec->ceq_id = ceq_id;

View File

@ -176,7 +176,7 @@ static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
ret = ice_add_rdma_qset(pf, &qset); ret = ice_add_rdma_qset(pf, &qset);
if (ret) { if (ret) {
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n"); ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
return -EINVAL; return ret;
} }
tc_node->l2_sched_node_id = qset.teid; tc_node->l2_sched_node_id = qset.teid;
@ -280,10 +280,9 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
irdma_fill_device_info(iwdev, pf, vsi); irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf; rf = iwdev->rf;
if (irdma_ctrl_init_hw(rf)) { err = irdma_ctrl_init_hw(rf);
err = -EIO; if (err)
goto err_ctrl_init; goto err_ctrl_init;
}
l2params.mtu = iwdev->netdev->mtu; l2params.mtu = iwdev->netdev->mtu;
ice_get_qos_params(pf, &qos_info); ice_get_qos_params(pf, &qos_info);
@ -291,10 +290,9 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode; iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
if (irdma_rt_init_hw(iwdev, &l2params)) { err = irdma_rt_init_hw(iwdev, &l2params);
err = -EIO; if (err)
goto err_rt_init; goto err_rt_init;
}
err = irdma_ib_register_device(iwdev); err = irdma_ib_register_device(iwdev);
if (err) if (err)

View File

@ -603,7 +603,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift, status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
&sqdepth); &sqdepth);
if (status) if (status)
return -ENOMEM; return status;
if (uk_attrs->hw_rev == IRDMA_GEN_1) if (uk_attrs->hw_rev == IRDMA_GEN_1)
rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1; rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
@ -614,7 +614,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift, status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
&rqdepth); &rqdepth);
if (status) if (status)
return -ENOMEM; return status;
iwqp->kqp.sq_wrid_mem = iwqp->kqp.sq_wrid_mem =
kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL); kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
@ -688,7 +688,7 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
status = irdma_handle_cqp_op(rf, cqp_request); status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request); irdma_put_cqp_request(&rf->cqp, cqp_request);
return status ? -ENOMEM : 0; return status;
} }
static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
@ -2316,7 +2316,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
false); false);
if (status) if (status)
return -ENOMEM; return status;
iwpbl->pbl_allocated = true; iwpbl->pbl_allocated = true;
level = palloc->level; level = palloc->level;
@ -2457,7 +2457,7 @@ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
status = irdma_handle_cqp_op(iwdev->rf, cqp_request); status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
return status ? -ENOMEM : 0; return status;
} }
/** /**
@ -3553,7 +3553,7 @@ error:
ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n", ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
__func__, ret); __func__, ret);
return -EINVAL; return ret;
} }
/** /**
@ -3873,10 +3873,8 @@ static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp; cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request); status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
if (status)
return -ENOMEM;
return 0; return status;
} }
/** /**