RDMA/irdma: Check contents of user-space irdma_mem_reg_req object

The contents of user-space req object is used in array indexing in
irdma_handle_q_mem without checking for valid values.

Guard against bad input on each of these req object pages by limiting them
to number of pages that make up the region.

Link: https://lore.kernel.org/r/20210625162329.1654-2-tatyana.e.nikolova@intel.com
Reported-by: coverity-bot <keescook+coverity-bot@chromium.org>
Addresses-Coverity-ID: 1505160 ("TAINTED_SCALAR")
Fixes: b48c24c2d7 ("RDMA/irdma: Implement device supported verb APIs")
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Shiraz Saleem 2021-06-25 11:23:28 -05:00 committed by Jason Gunthorpe
parent 36941dfe0e
commit 46308965ae

View File

@ -2358,12 +2358,10 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
struct irdma_cq_mr *cqmr = &iwpbl->cq_mr; struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
struct irdma_hmc_pble *hmc_p; struct irdma_hmc_pble *hmc_p;
u64 *arr = iwmr->pgaddrmem; u64 *arr = iwmr->pgaddrmem;
u32 pg_size; u32 pg_size, total;
int err = 0; int err = 0;
int total;
bool ret = true; bool ret = true;
total = req->sq_pages + req->rq_pages + req->cq_pages;
pg_size = iwmr->page_size; pg_size = iwmr->page_size;
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles); err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
if (err) if (err)
@ -2380,6 +2378,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
switch (iwmr->type) { switch (iwmr->type) {
case IRDMA_MEMREG_TYPE_QP: case IRDMA_MEMREG_TYPE_QP:
total = req->sq_pages + req->rq_pages;
hmc_p = &qpmr->sq_pbl; hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t)arr[total]; qpmr->shadow = (dma_addr_t)arr[total];
@ -2406,7 +2405,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
hmc_p = &cqmr->cq_pbl; hmc_p = &cqmr->cq_pbl;
if (!cqmr->split) if (!cqmr->split)
cqmr->shadow = (dma_addr_t)arr[total]; cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
if (use_pbles) if (use_pbles)
ret = irdma_check_mem_contiguous(arr, req->cq_pages, ret = irdma_check_mem_contiguous(arr, req->cq_pages,
@ -2747,7 +2746,8 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct irdma_mr *iwmr; struct irdma_mr *iwmr;
struct ib_umem *region; struct ib_umem *region;
struct irdma_mem_reg_req req; struct irdma_mem_reg_req req;
u32 stag = 0; u32 total, stag = 0;
u8 shadow_pgcnt = 1;
bool use_pbles = false; bool use_pbles = false;
unsigned long flags; unsigned long flags;
int err = -EINVAL; int err = -EINVAL;
@ -2801,7 +2801,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
switch (req.reg_type) { switch (req.reg_type) {
case IRDMA_MEMREG_TYPE_QP: case IRDMA_MEMREG_TYPE_QP:
use_pbles = ((req.sq_pages + req.rq_pages) > 2); total = req.sq_pages + req.rq_pages + shadow_pgcnt;
if (total > iwmr->page_cnt) {
err = -EINVAL;
goto error;
}
total = req.sq_pages + req.rq_pages;
use_pbles = (total > 2);
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
if (err) if (err)
goto error; goto error;
@ -2814,6 +2820,14 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break; break;
case IRDMA_MEMREG_TYPE_CQ: case IRDMA_MEMREG_TYPE_CQ:
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
shadow_pgcnt = 0;
total = req.cq_pages + shadow_pgcnt;
if (total > iwmr->page_cnt) {
err = -EINVAL;
goto error;
}
use_pbles = (req.cq_pages > 1); use_pbles = (req.cq_pages > 1);
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
if (err) if (err)