2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 15:44:02 +08:00

IB/uverbs: Handle large number of entries in poll CQ

In ib_uverbs_poll_cq() code there is a potential integer overflow if
userspace passes in a large cmd.ne.  The calls to kmalloc() would
allocate smaller buffers than intended, leading to memory corruption.
There iss also an information leak if resp wasn't all used.
Unprivileged userspace may call this function, although only if an
RDMA device that uses this function is present.

Fix this by copying CQ entries one at a time, which avoids the
allocation entirely, and also by moving this copying into a function
that makes sure to initialize all memory copied to userspace.

Special thanks to Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
for his help and advice.

Cc: <stable@kernel.org>
Signed-off-by: Dan Carpenter <error27@gmail.com>

[ Monkey around with things a bit to avoid bad code generation by gcc
  when designated initializers are used.  - Roland ]

Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Dan Carpenter 2010-10-13 09:13:12 +00:00 committed by Roland Dreier
parent 59e57c622c
commit 7182afea8d

View File

@ -893,68 +893,81 @@ out:
return ret ? ret : in_len;
}
static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
{
struct ib_uverbs_wc tmp;
tmp.wr_id = wc->wr_id;
tmp.status = wc->status;
tmp.opcode = wc->opcode;
tmp.vendor_err = wc->vendor_err;
tmp.byte_len = wc->byte_len;
tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
tmp.qp_num = wc->qp->qp_num;
tmp.src_qp = wc->src_qp;
tmp.wc_flags = wc->wc_flags;
tmp.pkey_index = wc->pkey_index;
tmp.slid = wc->slid;
tmp.sl = wc->sl;
tmp.dlid_path_bits = wc->dlid_path_bits;
tmp.port_num = wc->port_num;
tmp.reserved = 0;
if (copy_to_user(dest, &tmp, sizeof tmp))
return -EFAULT;
return 0;
}
ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_poll_cq cmd;
struct ib_uverbs_poll_cq_resp *resp;
struct ib_uverbs_poll_cq_resp resp;
u8 __user *header_ptr;
u8 __user *data_ptr;
struct ib_cq *cq;
struct ib_wc *wc;
int ret = 0;
int i;
int rsize;
struct ib_wc wc;
int ret;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
if (!wc)
return -ENOMEM;
rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc);
resp = kmalloc(rsize, GFP_KERNEL);
if (!resp) {
ret = -ENOMEM;
goto out_wc;
}
cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
if (!cq) {
ret = -EINVAL;
goto out;
if (!cq)
return -EINVAL;
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
header_ptr = (void __user *)(unsigned long) cmd.response;
data_ptr = header_ptr + sizeof resp;
memset(&resp, 0, sizeof resp);
while (resp.count < cmd.ne) {
ret = ib_poll_cq(cq, 1, &wc);
if (ret < 0)
goto out_put;
if (!ret)
break;
ret = copy_wc_to_user(data_ptr, &wc);
if (ret)
goto out_put;
data_ptr += sizeof(struct ib_uverbs_wc);
++resp.count;
}
resp->count = ib_poll_cq(cq, cmd.ne, wc);
put_cq_read(cq);
for (i = 0; i < resp->count; i++) {
resp->wc[i].wr_id = wc[i].wr_id;
resp->wc[i].status = wc[i].status;
resp->wc[i].opcode = wc[i].opcode;
resp->wc[i].vendor_err = wc[i].vendor_err;
resp->wc[i].byte_len = wc[i].byte_len;
resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data;
resp->wc[i].qp_num = wc[i].qp->qp_num;
resp->wc[i].src_qp = wc[i].src_qp;
resp->wc[i].wc_flags = wc[i].wc_flags;
resp->wc[i].pkey_index = wc[i].pkey_index;
resp->wc[i].slid = wc[i].slid;
resp->wc[i].sl = wc[i].sl;
resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
resp->wc[i].port_num = wc[i].port_num;
}
if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize))
if (copy_to_user(header_ptr, &resp, sizeof resp)) {
ret = -EFAULT;
goto out_put;
}
out:
kfree(resp);
ret = in_len;
out_wc:
kfree(wc);
return ret ? ret : in_len;
out_put:
put_cq_read(cq);
return ret;
}
ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,