linux/net/rds/ib_frmr.c

447 lines
12 KiB
C
Raw Normal View History

/*
* Copyright (c) 2016 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "ib_mr.h"
static inline void
rds_transition_frwr_state(struct rds_ib_mr *ibmr,
enum rds_ib_fr_state old_state,
enum rds_ib_fr_state new_state)
{
if (cmpxchg(&ibmr->u.frmr.fr_state,
old_state, new_state) == old_state &&
old_state == FRMR_IS_INUSE) {
/* enforce order of ibmr->u.frmr.fr_state update
* before decrementing i_fastreg_inuse_count
*/
smp_mb__before_atomic();
atomic_dec(&ibmr->ic->i_fastreg_inuse_count);
if (waitqueue_active(&rds_ib_ring_empty_wait))
wake_up(&rds_ib_ring_empty_wait);
}
}
static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
int npages)
{
struct rds_ib_mr_pool *pool;
struct rds_ib_mr *ibmr = NULL;
struct rds_ib_frmr *frmr;
int err = 0;
if (npages <= RDS_MR_8K_MSG_SIZE)
pool = rds_ibdev->mr_8k_pool;
else
pool = rds_ibdev->mr_1m_pool;
ibmr = rds_ib_try_reuse_ibmr(pool);
if (ibmr)
return ibmr;
ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
rdsibdev_to_node(rds_ibdev));
if (!ibmr) {
err = -ENOMEM;
goto out_no_cigar;
}
frmr = &ibmr->u.frmr;
frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
pool->max_pages);
if (IS_ERR(frmr->mr)) {
pr_warn("RDS/IB: %s failed to allocate MR", __func__);
err = PTR_ERR(frmr->mr);
goto out_no_cigar;
}
ibmr->pool = pool;
if (pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
if (atomic_read(&pool->item_count) > pool->max_items_soft)
pool->max_items_soft = pool->max_items;
frmr->fr_state = FRMR_IS_FREE;
init_waitqueue_head(&frmr->fr_inv_done);
init_waitqueue_head(&frmr->fr_reg_done);
return ibmr;
out_no_cigar:
kfree(ibmr);
atomic_dec(&pool->item_count);
return ERR_PTR(err);
}
static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop)
{
struct rds_ib_mr_pool *pool = ibmr->pool;
if (drop)
llist_add(&ibmr->llnode, &pool->drop_list);
else
llist_add(&ibmr->llnode, &pool->free_list);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 5)
queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
}
static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
{
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
struct ib_reg_wr reg_wr;
int ret, off = 0;
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
atomic_inc(&ibmr->ic->i_fastreg_wrs);
cpu_relax();
}
ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
&off, PAGE_SIZE);
if (unlikely(ret != ibmr->sg_len))
return ret < 0 ? ret : -EINVAL;
if (cmpxchg(&frmr->fr_state,
FRMR_IS_FREE, FRMR_IS_INUSE) != FRMR_IS_FREE)
return -EBUSY;
atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
/* Perform a WR for the fast_reg_mr. Each individual page
* in the sg list is added to the fast reg page list and placed
* inside the fast_reg_mr WR. The key used is a rolling 8bit
* counter, which should guarantee uniqueness.
*/
ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
frmr->fr_reg = true;
memset(&reg_wr, 0, sizeof(reg_wr));
reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
reg_wr.wr.opcode = IB_WR_REG_MR;
reg_wr.wr.num_sge = 0;
reg_wr.mr = frmr->mr;
reg_wr.key = frmr->mr->rkey;
reg_wr.access = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
reg_wr.wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, NULL);
if (unlikely(ret)) {
/* Failure here can be because of -ENOMEM as well */
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
atomic_inc(&ibmr->ic->i_fastreg_wrs);
if (printk_ratelimit())
pr_warn("RDS/IB: %s returned error(%d)\n",
__func__, ret);
goto out;
}
/* Wait for the registration to complete in order to prevent an invalid
* access error resulting from a race between the memory region already
* being accessed while registration is still pending.
*/
wait_event(frmr->fr_reg_done, !frmr->fr_reg);
out:
return ret;
}
static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
struct rds_ib_mr_pool *pool,
struct rds_ib_mr *ibmr,
struct scatterlist *sg, unsigned int sg_len)
{
struct ib_device *dev = rds_ibdev->dev;
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
int i;
u32 len;
int ret = 0;
/* We want to teardown old ibmr values here and fill it up with
* new sg values
*/
rds_ib_teardown_mr(ibmr);
ibmr->sg = sg;
ibmr->sg_len = sg_len;
ibmr->sg_dma_len = 0;
frmr->sg_byte_len = 0;
WARN_ON(ibmr->sg_dma_len);
ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len,
DMA_BIDIRECTIONAL);
if (unlikely(!ibmr->sg_dma_len)) {
pr_warn("RDS/IB: %s failed!\n", __func__);
return -EBUSY;
}
frmr->sg_byte_len = 0;
frmr->dma_npages = 0;
len = 0;
ret = -EINVAL;
for (i = 0; i < ibmr->sg_dma_len; ++i) {
unsigned int dma_len = sg_dma_len(&ibmr->sg[i]);
u64 dma_addr = sg_dma_address(&ibmr->sg[i]);
frmr->sg_byte_len += dma_len;
if (dma_addr & ~PAGE_MASK) {
if (i > 0)
goto out_unmap;
else
++frmr->dma_npages;
}
if ((dma_addr + dma_len) & ~PAGE_MASK) {
if (i < ibmr->sg_dma_len - 1)
goto out_unmap;
else
++frmr->dma_npages;
}
len += dma_len;
}
frmr->dma_npages += len >> PAGE_SHIFT;
if (frmr->dma_npages > ibmr->pool->max_pages) {
ret = -EMSGSIZE;
goto out_unmap;
}
ret = rds_ib_post_reg_frmr(ibmr);
if (ret)
goto out_unmap;
if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
return ret;
out_unmap:
ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
DMA_BIDIRECTIONAL);
ibmr->sg_dma_len = 0;
return ret;
}
static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
{
struct ib_send_wr *s_wr;
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id;
int ret = -EINVAL;
if (!i_cm_id || !i_cm_id->qp || !frmr->mr)
goto out;
if (frmr->fr_state != FRMR_IS_INUSE)
goto out;
Revert "RDS: IB: split the mr registration and invalidation path" This reverts commit 56012459310a1dbcc55c2dbf5500a9f7571402cb. RDS kept spinning inside function "rds_ib_post_reg_frmr", waiting for "i_fastreg_wrs" to become incremented: while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { atomic_inc(&ibmr->ic->i_fastreg_wrs); cpu_relax(); } Looking at the original commit: commit 56012459310a ("RDS: IB: split the mr registration and invalidation path") In there, the "rds_ib_mr_cqe_handler" was changed in the following way: void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) if (frmr->fr_inv) { frmr->fr_state = FRMR_IS_FREE; frmr->fr_inv = false; atomic_inc(&ic->i_fastreg_wrs); } else { atomic_inc(&ic->i_fastunreg_wrs); } It looks like it's got it exactly backwards: Function "rds_ib_post_reg_frmr" keeps track of the outstanding requests via "i_fastreg_wrs". Function "rds_ib_post_inv" keeps track of the outstanding requests via "i_fastunreg_wrs" (post original commit). It also sets: frmr->fr_inv = true; However the completion handler "rds_ib_mr_cqe_handler" adjusts "i_fastreg_wrs" when "fr_inv" had been true, and adjusts "i_fastunreg_wrs" otherwise. The original commit was done in the name of performance: to remove the performance bottleneck No performance benefit could be observed with a fixed-up version of the original commit measured between two Oracle X7 servers, both equipped with Mellanox Connect-X5 HCAs. The prudent course of action is to revert this commit. Signed-off-by: Gerd Rausch <gerd.rausch@oracle.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
2019-06-29 08:31:19 +08:00
while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
atomic_inc(&ibmr->ic->i_fastreg_wrs);
cpu_relax();
}
frmr->fr_inv = true;
s_wr = &frmr->fr_wr;
memset(s_wr, 0, sizeof(*s_wr));
s_wr->wr_id = (unsigned long)(void *)ibmr;
s_wr->opcode = IB_WR_LOCAL_INV;
s_wr->ex.invalidate_rkey = frmr->mr->rkey;
s_wr->send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(i_cm_id->qp, s_wr, NULL);
if (unlikely(ret)) {
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
frmr->fr_inv = false;
/* enforce order of frmr->fr_inv update
* before incrementing i_fastreg_wrs
*/
smp_mb__before_atomic();
Revert "RDS: IB: split the mr registration and invalidation path" This reverts commit 56012459310a1dbcc55c2dbf5500a9f7571402cb. RDS kept spinning inside function "rds_ib_post_reg_frmr", waiting for "i_fastreg_wrs" to become incremented: while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { atomic_inc(&ibmr->ic->i_fastreg_wrs); cpu_relax(); } Looking at the original commit: commit 56012459310a ("RDS: IB: split the mr registration and invalidation path") In there, the "rds_ib_mr_cqe_handler" was changed in the following way: void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) if (frmr->fr_inv) { frmr->fr_state = FRMR_IS_FREE; frmr->fr_inv = false; atomic_inc(&ic->i_fastreg_wrs); } else { atomic_inc(&ic->i_fastunreg_wrs); } It looks like it's got it exactly backwards: Function "rds_ib_post_reg_frmr" keeps track of the outstanding requests via "i_fastreg_wrs". Function "rds_ib_post_inv" keeps track of the outstanding requests via "i_fastunreg_wrs" (post original commit). It also sets: frmr->fr_inv = true; However the completion handler "rds_ib_mr_cqe_handler" adjusts "i_fastreg_wrs" when "fr_inv" had been true, and adjusts "i_fastunreg_wrs" otherwise. The original commit was done in the name of performance: to remove the performance bottleneck No performance benefit could be observed with a fixed-up version of the original commit measured between two Oracle X7 servers, both equipped with Mellanox Connect-X5 HCAs. The prudent course of action is to revert this commit. Signed-off-by: Gerd Rausch <gerd.rausch@oracle.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
2019-06-29 08:31:19 +08:00
atomic_inc(&ibmr->ic->i_fastreg_wrs);
pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
goto out;
}
/* Wait for the FRMR_IS_FREE (or FRMR_IS_STALE) transition in order to
* 1) avoid a silly bouncing between "clean_list" and "drop_list"
* triggered by function "rds_ib_reg_frmr" as it is releases frmr
* regions whose state is not "FRMR_IS_FREE" right away.
* 2) prevents an invalid access error in a race
* from a pending "IB_WR_LOCAL_INV" operation
* with a teardown ("dma_unmap_sg", "put_page")
* and de-registration ("ib_dereg_mr") of the corresponding
* memory region.
*/
wait_event(frmr->fr_inv_done, frmr->fr_state != FRMR_IS_INUSE);
out:
return ret;
}
void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
{
struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id;
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
if (wc->status != IB_WC_SUCCESS) {
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
if (rds_conn_up(ic->conn))
rds_ib_conn_error(ic->conn,
"frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
&ic->conn->c_laddr,
&ic->conn->c_faddr,
wc->status,
ib_wc_status_msg(wc->status),
wc->vendor_err);
}
if (frmr->fr_inv) {
rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE);
frmr->fr_inv = false;
wake_up(&frmr->fr_inv_done);
}
Revert "RDS: IB: split the mr registration and invalidation path" This reverts commit 56012459310a1dbcc55c2dbf5500a9f7571402cb. RDS kept spinning inside function "rds_ib_post_reg_frmr", waiting for "i_fastreg_wrs" to become incremented: while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { atomic_inc(&ibmr->ic->i_fastreg_wrs); cpu_relax(); } Looking at the original commit: commit 56012459310a ("RDS: IB: split the mr registration and invalidation path") In there, the "rds_ib_mr_cqe_handler" was changed in the following way: void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) if (frmr->fr_inv) { frmr->fr_state = FRMR_IS_FREE; frmr->fr_inv = false; atomic_inc(&ic->i_fastreg_wrs); } else { atomic_inc(&ic->i_fastunreg_wrs); } It looks like it's got it exactly backwards: Function "rds_ib_post_reg_frmr" keeps track of the outstanding requests via "i_fastreg_wrs". Function "rds_ib_post_inv" keeps track of the outstanding requests via "i_fastunreg_wrs" (post original commit). It also sets: frmr->fr_inv = true; However the completion handler "rds_ib_mr_cqe_handler" adjusts "i_fastreg_wrs" when "fr_inv" had been true, and adjusts "i_fastunreg_wrs" otherwise. The original commit was done in the name of performance: to remove the performance bottleneck No performance benefit could be observed with a fixed-up version of the original commit measured between two Oracle X7 servers, both equipped with Mellanox Connect-X5 HCAs. The prudent course of action is to revert this commit. Signed-off-by: Gerd Rausch <gerd.rausch@oracle.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
2019-06-29 08:31:19 +08:00
if (frmr->fr_reg) {
frmr->fr_reg = false;
wake_up(&frmr->fr_reg_done);
}
/* enforce order of frmr->{fr_reg,fr_inv} update
* before incrementing i_fastreg_wrs
*/
smp_mb__before_atomic();
Revert "RDS: IB: split the mr registration and invalidation path" This reverts commit 56012459310a1dbcc55c2dbf5500a9f7571402cb. RDS kept spinning inside function "rds_ib_post_reg_frmr", waiting for "i_fastreg_wrs" to become incremented: while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { atomic_inc(&ibmr->ic->i_fastreg_wrs); cpu_relax(); } Looking at the original commit: commit 56012459310a ("RDS: IB: split the mr registration and invalidation path") In there, the "rds_ib_mr_cqe_handler" was changed in the following way: void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) if (frmr->fr_inv) { frmr->fr_state = FRMR_IS_FREE; frmr->fr_inv = false; atomic_inc(&ic->i_fastreg_wrs); } else { atomic_inc(&ic->i_fastunreg_wrs); } It looks like it's got it exactly backwards: Function "rds_ib_post_reg_frmr" keeps track of the outstanding requests via "i_fastreg_wrs". Function "rds_ib_post_inv" keeps track of the outstanding requests via "i_fastunreg_wrs" (post original commit). It also sets: frmr->fr_inv = true; However the completion handler "rds_ib_mr_cqe_handler" adjusts "i_fastreg_wrs" when "fr_inv" had been true, and adjusts "i_fastunreg_wrs" otherwise. The original commit was done in the name of performance: to remove the performance bottleneck No performance benefit could be observed with a fixed-up version of the original commit measured between two Oracle X7 servers, both equipped with Mellanox Connect-X5 HCAs. The prudent course of action is to revert this commit. Signed-off-by: Gerd Rausch <gerd.rausch@oracle.com> Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
2019-06-29 08:31:19 +08:00
atomic_inc(&ic->i_fastreg_wrs);
}
void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
unsigned long *unpinned, unsigned int goal)
{
struct rds_ib_mr *ibmr, *next;
struct rds_ib_frmr *frmr;
int ret = 0, ret2;
unsigned int freed = *nfreed;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
list_for_each_entry(ibmr, list, unmap_list) {
if (ibmr->sg_dma_len) {
ret2 = rds_ib_post_inv(ibmr);
if (ret2 && !ret)
ret = ret2;
}
}
if (ret)
pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
/* Now we can destroy the DMA mapping and unpin any pages */
list_for_each_entry_safe(ibmr, next, list, unmap_list) {
*unpinned += ibmr->sg_len;
frmr = &ibmr->u.frmr;
__rds_ib_teardown_mr(ibmr);
if (freed < goal || frmr->fr_state == FRMR_IS_STALE) {
/* Don't de-allocate if the MR is not free yet */
if (frmr->fr_state == FRMR_IS_INUSE)
continue;
if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
else
rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
list_del(&ibmr->unmap_list);
if (frmr->mr)
ib_dereg_mr(frmr->mr);
kfree(ibmr);
freed++;
}
}
*nfreed = freed;
}
struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
struct rds_ib_connection *ic,
struct scatterlist *sg,
unsigned long nents, u32 *key)
{
struct rds_ib_mr *ibmr = NULL;
struct rds_ib_frmr *frmr;
int ret;
RDS: RDMA: Fix the NULL-ptr deref in rds_ib_get_mr Registration of a memory region(MR) through FRMR/fastreg(unlike FMR) needs a connection/qp. With a proxy qp, this dependency on connection will be removed, but that needs more infrastructure patches, which is a work in progress. As an intermediate fix, the get_mr returns EOPNOTSUPP when connection details are not populated. The MR registration through sendmsg() will continue to work even with fast registration, since connection in this case is formed upfront. This patch fixes the following crash: kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN Modules linked in: CPU: 1 PID: 4244 Comm: syzkaller468044 Not tainted 4.16.0-rc6+ #361 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:rds_ib_get_mr+0x5c/0x230 net/rds/ib_rdma.c:544 RSP: 0018:ffff8801b059f890 EFLAGS: 00010202 RAX: dffffc0000000000 RBX: ffff8801b07e1300 RCX: ffffffff8562d96e RDX: 000000000000000d RSI: 0000000000000001 RDI: 0000000000000068 RBP: ffff8801b059f8b8 R08: ffffed0036274244 R09: ffff8801b13a1200 R10: 0000000000000004 R11: ffffed0036274243 R12: ffff8801b13a1200 R13: 0000000000000001 R14: ffff8801ca09fa9c R15: 0000000000000000 FS: 00007f4d050af700(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f4d050aee78 CR3: 00000001b0d9b006 CR4: 00000000001606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __rds_rdma_map+0x710/0x1050 net/rds/rdma.c:271 rds_get_mr_for_dest+0x1d4/0x2c0 net/rds/rdma.c:357 rds_setsockopt+0x6cc/0x980 net/rds/af_rds.c:347 SYSC_setsockopt net/socket.c:1849 [inline] SyS_setsockopt+0x189/0x360 net/socket.c:1828 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 RIP: 0033:0x4456d9 RSP: 002b:00007f4d050aedb8 EFLAGS: 00000246 ORIG_RAX: 0000000000000036 RAX: ffffffffffffffda RBX: 00000000006dac3c RCX: 00000000004456d9 RDX: 0000000000000007 RSI: 0000000000000114 RDI: 0000000000000004 RBP: 00000000006dac38 R08: 00000000000000a0 R09: 0000000000000000 R10: 0000000020000380 R11: 0000000000000246 R12: 0000000000000000 R13: 00007fffbfb36d6f R14: 00007f4d050af9c0 R15: 0000000000000005 Code: fa 48 c1 ea 03 80 3c 02 00 0f 85 cc 01 00 00 4c 8b bb 80 04 00 00 48 b8 00 00 00 00 00 fc ff df 49 8d 7f 68 48 89 fa 48 c1 ea 03 <80> 3c 02 00 0f 85 9c 01 00 00 4d 8b 7f 68 48 b8 00 00 00 00 00 RIP: rds_ib_get_mr+0x5c/0x230 net/rds/ib_rdma.c:544 RSP: ffff8801b059f890 ---[ end trace 7e1cea13b85473b0 ]--- Reported-by: syzbot+b51c77ef956678a65834@syzkaller.appspotmail.com Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: Avinash Repaka <avinash.repaka@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-25 11:31:58 +08:00
if (!ic) {
/* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
return ERR_PTR(-EOPNOTSUPP);
}
do {
if (ibmr)
rds_ib_free_frmr(ibmr, true);
ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
if (IS_ERR(ibmr))
return ibmr;
frmr = &ibmr->u.frmr;
} while (frmr->fr_state != FRMR_IS_FREE);
ibmr->ic = ic;
ibmr->device = rds_ibdev;
ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
if (ret == 0) {
*key = frmr->mr->rkey;
} else {
rds_ib_free_frmr(ibmr, false);
ibmr = ERR_PTR(ret);
}
return ibmr;
}
void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr)
{
struct rds_ib_mr_pool *pool = ibmr->pool;
struct rds_ib_frmr *frmr = &ibmr->u.frmr;
if (frmr->fr_state == FRMR_IS_STALE)
llist_add(&ibmr->llnode, &pool->drop_list);
else
llist_add(&ibmr->llnode, &pool->free_list);
}