mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-20 02:34:23 +08:00
RDMA/hns: Move SRQ code to the reasonable place
Just move the SRQ related code to more reasonable place, and unify format of some prints. Link: https://lore.kernel.org/r/1588071823-40200-5-git-send-email-liweihang@huawei.com Signed-off-by: Yixian Liu <liuyixian@huawei.com> Signed-off-by: Weihang Li <liweihang@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
54d6638765
commit
ffb1308b88
@ -694,6 +694,129 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
|
||||
{
|
||||
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
|
||||
}
|
||||
|
||||
static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
|
||||
{
|
||||
/* always called with interrupts disabled. */
|
||||
spin_lock(&srq->lock);
|
||||
|
||||
bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
|
||||
srq->tail++;
|
||||
|
||||
spin_unlock(&srq->lock);
|
||||
}
|
||||
|
||||
static int find_empty_entry(struct hns_roce_idx_que *idx_que,
|
||||
unsigned long size)
|
||||
{
|
||||
int wqe_idx;
|
||||
|
||||
if (unlikely(bitmap_full(idx_que->bitmap, size)))
|
||||
return -ENOSPC;
|
||||
|
||||
wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
|
||||
|
||||
bitmap_set(idx_que->bitmap, wqe_idx, 1);
|
||||
|
||||
return wqe_idx;
|
||||
}
|
||||
|
||||
static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
|
||||
int cur_idx, int wqe_idx)
|
||||
{
|
||||
unsigned int *addr;
|
||||
|
||||
addr = (unsigned int *)hns_roce_buf_offset(idx_que->mtr.kmem,
|
||||
cur_idx * idx_que->entry_sz);
|
||||
*addr = wqe_idx;
|
||||
}
|
||||
|
||||
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
|
||||
const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
|
||||
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
|
||||
struct hns_roce_v2_wqe_data_seg *dseg;
|
||||
struct hns_roce_v2_db srq_db;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
int wqe_idx;
|
||||
void *wqe;
|
||||
int nreq;
|
||||
int ind;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&srq->lock, flags);
|
||||
|
||||
ind = srq->head & (srq->wqe_cnt - 1);
|
||||
|
||||
for (nreq = 0; wr; ++nreq, wr = wr->next) {
|
||||
if (unlikely(wr->num_sge >= srq->max_gs)) {
|
||||
ret = -EINVAL;
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(srq->head == srq->tail)) {
|
||||
ret = -ENOMEM;
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
|
||||
if (wqe_idx < 0) {
|
||||
ret = -ENOMEM;
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
fill_idx_queue(&srq->idx_que, ind, wqe_idx);
|
||||
wqe = get_srq_wqe(srq, wqe_idx);
|
||||
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
|
||||
dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
|
||||
dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
|
||||
}
|
||||
|
||||
if (i < srq->max_gs) {
|
||||
dseg[i].len = 0;
|
||||
dseg[i].lkey = cpu_to_le32(0x100);
|
||||
dseg[i].addr = 0;
|
||||
}
|
||||
|
||||
srq->wrid[wqe_idx] = wr->wr_id;
|
||||
ind = (ind + 1) & (srq->wqe_cnt - 1);
|
||||
}
|
||||
|
||||
if (likely(nreq)) {
|
||||
srq->head += nreq;
|
||||
|
||||
/*
|
||||
* Make sure that descriptors are written before
|
||||
* doorbell record.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
srq_db.byte_4 =
|
||||
cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
|
||||
(srq->srqn & V2_DB_BYTE_4_TAG_M));
|
||||
srq_db.parameter = cpu_to_le32(srq->head);
|
||||
|
||||
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&srq->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
|
||||
unsigned long instance_stage,
|
||||
unsigned long reset_stage)
|
||||
@ -2667,22 +2790,6 @@ static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
|
||||
return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
|
||||
}
|
||||
|
||||
static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
|
||||
{
|
||||
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
|
||||
}
|
||||
|
||||
static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
|
||||
{
|
||||
/* always called with interrupts disabled. */
|
||||
spin_lock(&srq->lock);
|
||||
|
||||
bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
|
||||
srq->tail++;
|
||||
|
||||
spin_unlock(&srq->lock);
|
||||
}
|
||||
|
||||
static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
|
||||
{
|
||||
*hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
|
||||
@ -4777,6 +4884,186 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
|
||||
u32 cqn, void *mb_buf, u64 *mtts_wqe,
|
||||
u64 *mtts_idx, dma_addr_t dma_handle_wqe,
|
||||
dma_addr_t dma_handle_idx)
|
||||
{
|
||||
struct hns_roce_srq_context *srq_context;
|
||||
|
||||
srq_context = mb_buf;
|
||||
memset(srq_context, 0, sizeof(*srq_context));
|
||||
|
||||
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
|
||||
SRQC_BYTE_4_SRQ_ST_S, 1);
|
||||
|
||||
roce_set_field(srq_context->byte_4_srqn_srqst,
|
||||
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
|
||||
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
|
||||
(hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
|
||||
hr_dev->caps.srqwqe_hop_num));
|
||||
roce_set_field(srq_context->byte_4_srqn_srqst,
|
||||
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
|
||||
ilog2(srq->wqe_cnt));
|
||||
|
||||
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
|
||||
SRQC_BYTE_4_SRQN_S, srq->srqn);
|
||||
|
||||
roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
|
||||
|
||||
roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
|
||||
SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
|
||||
|
||||
srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
|
||||
|
||||
roce_set_field(srq_context->byte_24_wqe_bt_ba,
|
||||
SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
|
||||
SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
|
||||
dma_handle_wqe >> 35);
|
||||
|
||||
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
|
||||
SRQC_BYTE_28_PD_S, pdn);
|
||||
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
|
||||
SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
|
||||
fls(srq->max_gs - 1));
|
||||
|
||||
srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
|
||||
roce_set_field(srq_context->rsv_idx_bt_ba,
|
||||
SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
|
||||
SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
|
||||
dma_handle_idx >> 35);
|
||||
|
||||
srq_context->idx_cur_blk_addr =
|
||||
cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
|
||||
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
|
||||
SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
|
||||
SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
|
||||
upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
|
||||
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
|
||||
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
|
||||
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
|
||||
hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
|
||||
hr_dev->caps.idx_hop_num);
|
||||
|
||||
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
|
||||
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
|
||||
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
|
||||
to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
|
||||
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
|
||||
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
|
||||
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
|
||||
to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
|
||||
|
||||
srq_context->idx_nxt_blk_addr =
|
||||
cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
|
||||
roce_set_field(srq_context->rsv_idxnxtblkaddr,
|
||||
SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
|
||||
SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
|
||||
upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
|
||||
roce_set_field(srq_context->byte_56_xrc_cqn,
|
||||
SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
|
||||
cqn);
|
||||
roce_set_field(srq_context->byte_56_xrc_cqn,
|
||||
SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
|
||||
SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
|
||||
to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
|
||||
roce_set_field(srq_context->byte_56_xrc_cqn,
|
||||
SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
|
||||
SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
|
||||
to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
|
||||
|
||||
roce_set_bit(srq_context->db_record_addr_record_en,
|
||||
SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
|
||||
}
|
||||
|
||||
static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
|
||||
struct ib_srq_attr *srq_attr,
|
||||
enum ib_srq_attr_mask srq_attr_mask,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
|
||||
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
|
||||
struct hns_roce_srq_context *srq_context;
|
||||
struct hns_roce_srq_context *srqc_mask;
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
int ret;
|
||||
|
||||
if (srq_attr_mask & IB_SRQ_LIMIT) {
|
||||
if (srq_attr->srq_limit >= srq->wqe_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
srq_context = mailbox->buf;
|
||||
srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
|
||||
|
||||
memset(srqc_mask, 0xff, sizeof(*srqc_mask));
|
||||
|
||||
roce_set_field(srq_context->byte_8_limit_wl,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
|
||||
roce_set_field(srqc_mask->byte_8_limit_wl,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
|
||||
|
||||
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
|
||||
HNS_ROCE_CMD_MODIFY_SRQC,
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
if (ret) {
|
||||
ibdev_err(&hr_dev->ib_dev,
|
||||
"failed to handle cmd of modifying SRQ, ret = %d.\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
|
||||
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
|
||||
struct hns_roce_srq_context *srq_context;
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
int limit_wl;
|
||||
int ret;
|
||||
|
||||
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
srq_context = mailbox->buf;
|
||||
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
|
||||
HNS_ROCE_CMD_QUERY_SRQC,
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
if (ret) {
|
||||
ibdev_err(&hr_dev->ib_dev,
|
||||
"failed to process cmd of querying SRQ, ret = %d.\n",
|
||||
ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
|
||||
|
||||
attr->srq_limit = limit_wl;
|
||||
attr->max_wr = srq->wqe_cnt;
|
||||
attr->max_sge = srq->max_gs;
|
||||
|
||||
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
|
||||
|
||||
out:
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
|
||||
@ -5588,296 +5875,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
|
||||
destroy_workqueue(hr_dev->irq_workq);
|
||||
}
|
||||
|
||||
static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
|
||||
u32 cqn, void *mb_buf, u64 *mtts_wqe,
|
||||
u64 *mtts_idx, dma_addr_t dma_handle_wqe,
|
||||
dma_addr_t dma_handle_idx)
|
||||
{
|
||||
struct hns_roce_srq_context *srq_context;
|
||||
|
||||
srq_context = mb_buf;
|
||||
memset(srq_context, 0, sizeof(*srq_context));
|
||||
|
||||
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
|
||||
SRQC_BYTE_4_SRQ_ST_S, 1);
|
||||
|
||||
roce_set_field(srq_context->byte_4_srqn_srqst,
|
||||
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
|
||||
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
|
||||
(hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
|
||||
hr_dev->caps.srqwqe_hop_num));
|
||||
roce_set_field(srq_context->byte_4_srqn_srqst,
|
||||
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
|
||||
ilog2(srq->wqe_cnt));
|
||||
|
||||
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
|
||||
SRQC_BYTE_4_SRQN_S, srq->srqn);
|
||||
|
||||
roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
|
||||
|
||||
roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
|
||||
SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
|
||||
|
||||
srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
|
||||
|
||||
roce_set_field(srq_context->byte_24_wqe_bt_ba,
|
||||
SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
|
||||
SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
|
||||
dma_handle_wqe >> 35);
|
||||
|
||||
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
|
||||
SRQC_BYTE_28_PD_S, pdn);
|
||||
roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
|
||||
SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
|
||||
fls(srq->max_gs - 1));
|
||||
|
||||
srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
|
||||
roce_set_field(srq_context->rsv_idx_bt_ba,
|
||||
SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
|
||||
SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
|
||||
dma_handle_idx >> 35);
|
||||
|
||||
srq_context->idx_cur_blk_addr =
|
||||
cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
|
||||
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
|
||||
SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
|
||||
SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
|
||||
upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
|
||||
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
|
||||
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
|
||||
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
|
||||
hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
|
||||
hr_dev->caps.idx_hop_num);
|
||||
|
||||
roce_set_field(
|
||||
srq_context->byte_44_idxbufpgsz_addr,
|
||||
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
|
||||
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
|
||||
to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
|
||||
roce_set_field(
|
||||
srq_context->byte_44_idxbufpgsz_addr,
|
||||
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
|
||||
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
|
||||
to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
|
||||
|
||||
srq_context->idx_nxt_blk_addr =
|
||||
cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
|
||||
roce_set_field(srq_context->rsv_idxnxtblkaddr,
|
||||
SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
|
||||
SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
|
||||
upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
|
||||
roce_set_field(srq_context->byte_56_xrc_cqn,
|
||||
SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
|
||||
cqn);
|
||||
roce_set_field(srq_context->byte_56_xrc_cqn,
|
||||
SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
|
||||
SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
|
||||
to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
|
||||
roce_set_field(srq_context->byte_56_xrc_cqn,
|
||||
SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
|
||||
SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
|
||||
to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
|
||||
|
||||
roce_set_bit(srq_context->db_record_addr_record_en,
|
||||
SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
|
||||
}
|
||||
|
||||
static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
|
||||
struct ib_srq_attr *srq_attr,
|
||||
enum ib_srq_attr_mask srq_attr_mask,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
|
||||
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
|
||||
struct hns_roce_srq_context *srq_context;
|
||||
struct hns_roce_srq_context *srqc_mask;
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
int ret;
|
||||
|
||||
if (srq_attr_mask & IB_SRQ_LIMIT) {
|
||||
if (srq_attr->srq_limit >= srq->wqe_cnt)
|
||||
return -EINVAL;
|
||||
|
||||
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
srq_context = mailbox->buf;
|
||||
srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
|
||||
|
||||
memset(srqc_mask, 0xff, sizeof(*srqc_mask));
|
||||
|
||||
roce_set_field(srq_context->byte_8_limit_wl,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
|
||||
roce_set_field(srqc_mask->byte_8_limit_wl,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
|
||||
|
||||
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
|
||||
HNS_ROCE_CMD_MODIFY_SRQC,
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
if (ret) {
|
||||
ibdev_err(&hr_dev->ib_dev,
|
||||
"failed to process cmd when modifying SRQ, ret = %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
|
||||
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
|
||||
struct hns_roce_srq_context *srq_context;
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
int limit_wl;
|
||||
int ret;
|
||||
|
||||
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
srq_context = mailbox->buf;
|
||||
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
|
||||
HNS_ROCE_CMD_QUERY_SRQC,
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
if (ret) {
|
||||
ibdev_err(&hr_dev->ib_dev,
|
||||
"failed to process cmd when querying SRQ, ret = %d\n",
|
||||
ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_M,
|
||||
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
|
||||
|
||||
attr->srq_limit = limit_wl;
|
||||
attr->max_wr = srq->wqe_cnt - 1;
|
||||
attr->max_sge = srq->max_gs;
|
||||
|
||||
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
|
||||
|
||||
out:
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int find_empty_entry(struct hns_roce_idx_que *idx_que,
|
||||
unsigned long size)
|
||||
{
|
||||
int wqe_idx;
|
||||
|
||||
if (unlikely(bitmap_full(idx_que->bitmap, size)))
|
||||
return -ENOSPC;
|
||||
|
||||
wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
|
||||
|
||||
bitmap_set(idx_que->bitmap, wqe_idx, 1);
|
||||
|
||||
return wqe_idx;
|
||||
}
|
||||
|
||||
static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
|
||||
int cur_idx, int wqe_idx)
|
||||
{
|
||||
unsigned int *addr;
|
||||
|
||||
addr = (unsigned int *)hns_roce_buf_offset(idx_que->mtr.kmem,
|
||||
cur_idx * idx_que->entry_sz);
|
||||
*addr = wqe_idx;
|
||||
}
|
||||
|
||||
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
|
||||
const struct ib_recv_wr *wr,
|
||||
const struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
|
||||
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
|
||||
struct hns_roce_v2_wqe_data_seg *dseg;
|
||||
struct hns_roce_v2_db srq_db;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
int wqe_idx;
|
||||
void *wqe;
|
||||
int nreq;
|
||||
int ind;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&srq->lock, flags);
|
||||
|
||||
ind = srq->head & (srq->wqe_cnt - 1);
|
||||
|
||||
for (nreq = 0; wr; ++nreq, wr = wr->next) {
|
||||
if (unlikely(wr->num_sge > srq->max_gs)) {
|
||||
ret = -EINVAL;
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(srq->head == srq->tail)) {
|
||||
ret = -ENOMEM;
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
|
||||
if (wqe_idx < 0) {
|
||||
ret = -ENOMEM;
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
|
||||
fill_idx_queue(&srq->idx_que, ind, wqe_idx);
|
||||
wqe = get_srq_wqe(srq, wqe_idx);
|
||||
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i) {
|
||||
dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
|
||||
dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
|
||||
dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
|
||||
}
|
||||
|
||||
if (i < srq->max_gs) {
|
||||
dseg[i].len = 0;
|
||||
dseg[i].lkey = cpu_to_le32(0x100);
|
||||
dseg[i].addr = 0;
|
||||
}
|
||||
|
||||
srq->wrid[wqe_idx] = wr->wr_id;
|
||||
ind = (ind + 1) & (srq->wqe_cnt - 1);
|
||||
}
|
||||
|
||||
if (likely(nreq)) {
|
||||
srq->head += nreq;
|
||||
|
||||
/*
|
||||
* Make sure that descriptors are written before
|
||||
* doorbell record.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
srq_db.byte_4 =
|
||||
cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
|
||||
(srq->srqn & V2_DB_BYTE_4_TAG_M));
|
||||
srq_db.parameter = cpu_to_le32(srq->head);
|
||||
|
||||
hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
|
||||
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&srq->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
|
||||
.query_cqc_info = hns_roce_v2_query_cqc_info,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user