mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-15 10:24:44 +08:00
IB/mlx5: Add ODP WQE handlers for kernel QPs
One of the steps in ODP page fault handler for WQEs is to read a WQE from a QP send queue or receive queue buffer at a specific index. Since the implementation of this buffer is different between kernel and user QP the implementation of the handler needs to be aware of that and handle it in a different way. ODP for kernel MRs is currently supported only for RDMA_READ and RDMA_WRITE operations so change the handler to - read a WQE from a kernel QP send queue - fail if access to receive queue or shared receive queue is required for a kernel QP Signed-off-by: Moni Shoua <monis@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
This commit is contained in:
parent
87d8069f6b
commit
da9ee9d8a8
@ -1153,12 +1153,12 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
|||||||
const struct ib_send_wr **bad_wr);
|
const struct ib_send_wr **bad_wr);
|
||||||
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||||
const struct ib_recv_wr **bad_wr);
|
const struct ib_recv_wr **bad_wr);
|
||||||
int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
|
int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
|
||||||
int buflen, size_t *bc);
|
size_t buflen, size_t *bc);
|
||||||
int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
|
int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
|
||||||
int buflen, size_t *bc);
|
size_t buflen, size_t *bc);
|
||||||
int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
|
int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
|
||||||
void *buffer, int buflen, size_t *bc);
|
size_t buflen, size_t *bc);
|
||||||
int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||||
struct ib_udata *udata);
|
struct ib_udata *udata);
|
||||||
void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||||
|
@ -1237,15 +1237,15 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
|
|||||||
wqe = wqe_start;
|
wqe = wqe_start;
|
||||||
qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
|
qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
|
||||||
if (qp && sq) {
|
if (qp && sq) {
|
||||||
ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
|
ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
|
||||||
&bytes_copied);
|
&bytes_copied);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto read_user;
|
goto read_user;
|
||||||
ret = mlx5_ib_mr_initiator_pfault_handler(
|
ret = mlx5_ib_mr_initiator_pfault_handler(
|
||||||
dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
|
dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
|
||||||
} else if (qp && !sq) {
|
} else if (qp && !sq) {
|
||||||
ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
|
ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
|
||||||
&bytes_copied);
|
&bytes_copied);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto read_user;
|
goto read_user;
|
||||||
ret = mlx5_ib_mr_responder_pfault_handler_rq(
|
ret = mlx5_ib_mr_responder_pfault_handler_rq(
|
||||||
@ -1253,8 +1253,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
|
|||||||
} else if (!qp) {
|
} else if (!qp) {
|
||||||
struct mlx5_ib_srq *srq = res_to_srq(res);
|
struct mlx5_ib_srq *srq = res_to_srq(res);
|
||||||
|
|
||||||
ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
|
ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
|
||||||
&bytes_copied);
|
&bytes_copied);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto read_user;
|
goto read_user;
|
||||||
ret = mlx5_ib_mr_responder_pfault_handler_srq(
|
ret = mlx5_ib_mr_responder_pfault_handler_srq(
|
||||||
|
@ -129,14 +129,10 @@ static int is_sqp(enum ib_qp_type qp_type)
|
|||||||
*
|
*
|
||||||
* Return: zero on success, or an error code.
|
* Return: zero on success, or an error code.
|
||||||
*/
|
*/
|
||||||
static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
|
static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
|
||||||
void *buffer,
|
size_t buflen, int wqe_index,
|
||||||
u32 buflen,
|
int wq_offset, int wq_wqe_cnt,
|
||||||
int wqe_index,
|
int wq_wqe_shift, int bcnt,
|
||||||
int wq_offset,
|
|
||||||
int wq_wqe_cnt,
|
|
||||||
int wq_wqe_shift,
|
|
||||||
int bcnt,
|
|
||||||
size_t *bytes_copied)
|
size_t *bytes_copied)
|
||||||
{
|
{
|
||||||
size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
|
size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
|
||||||
@ -160,11 +156,43 @@ static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
|
static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
|
||||||
int wqe_index,
|
void *buffer, size_t buflen, size_t *bc)
|
||||||
void *buffer,
|
{
|
||||||
int buflen,
|
struct mlx5_wqe_ctrl_seg *ctrl;
|
||||||
size_t *bc)
|
size_t bytes_copied = 0;
|
||||||
|
size_t wqe_length;
|
||||||
|
void *p;
|
||||||
|
int ds;
|
||||||
|
|
||||||
|
wqe_index = wqe_index & qp->sq.fbc.sz_m1;
|
||||||
|
|
||||||
|
/* read the control segment first */
|
||||||
|
p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
|
||||||
|
ctrl = p;
|
||||||
|
ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
|
||||||
|
wqe_length = ds * MLX5_WQE_DS_UNITS;
|
||||||
|
|
||||||
|
/* read rest of WQE if it spreads over more than one stride */
|
||||||
|
while (bytes_copied < wqe_length) {
|
||||||
|
size_t copy_length =
|
||||||
|
min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
|
||||||
|
|
||||||
|
if (!copy_length)
|
||||||
|
break;
|
||||||
|
|
||||||
|
memcpy(buffer + bytes_copied, p, copy_length);
|
||||||
|
bytes_copied += copy_length;
|
||||||
|
|
||||||
|
wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
|
||||||
|
p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
|
||||||
|
}
|
||||||
|
*bc = bytes_copied;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
|
||||||
|
void *buffer, size_t buflen, size_t *bc)
|
||||||
{
|
{
|
||||||
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
|
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
|
||||||
struct ib_umem *umem = base->ubuffer.umem;
|
struct ib_umem *umem = base->ubuffer.umem;
|
||||||
@ -176,18 +204,10 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
|
|||||||
int ret;
|
int ret;
|
||||||
int ds;
|
int ds;
|
||||||
|
|
||||||
if (buflen < sizeof(*ctrl))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* at first read as much as possible */
|
/* at first read as much as possible */
|
||||||
ret = mlx5_ib_read_user_wqe_common(umem,
|
ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
|
||||||
buffer,
|
wq->offset, wq->wqe_cnt,
|
||||||
buflen,
|
wq->wqe_shift, buflen,
|
||||||
wqe_index,
|
|
||||||
wq->offset,
|
|
||||||
wq->wqe_cnt,
|
|
||||||
wq->wqe_shift,
|
|
||||||
buflen,
|
|
||||||
&bytes_copied);
|
&bytes_copied);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -210,13 +230,9 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
|
|||||||
* so read the remaining bytes starting
|
* so read the remaining bytes starting
|
||||||
* from wqe_index 0
|
* from wqe_index 0
|
||||||
*/
|
*/
|
||||||
ret = mlx5_ib_read_user_wqe_common(umem,
|
ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
|
||||||
buffer + bytes_copied,
|
buflen - bytes_copied, 0, wq->offset,
|
||||||
buflen - bytes_copied,
|
wq->wqe_cnt, wq->wqe_shift,
|
||||||
0,
|
|
||||||
wq->offset,
|
|
||||||
wq->wqe_cnt,
|
|
||||||
wq->wqe_shift,
|
|
||||||
wqe_length - bytes_copied,
|
wqe_length - bytes_copied,
|
||||||
&bytes_copied2);
|
&bytes_copied2);
|
||||||
|
|
||||||
@ -226,11 +242,24 @@ int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
|
int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
|
||||||
int wqe_index,
|
size_t buflen, size_t *bc)
|
||||||
void *buffer,
|
{
|
||||||
int buflen,
|
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
|
||||||
size_t *bc)
|
struct ib_umem *umem = base->ubuffer.umem;
|
||||||
|
|
||||||
|
if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!umem)
|
||||||
|
return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
|
||||||
|
buflen, bc);
|
||||||
|
|
||||||
|
return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
|
||||||
|
void *buffer, size_t buflen, size_t *bc)
|
||||||
{
|
{
|
||||||
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
|
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
|
||||||
struct ib_umem *umem = base->ubuffer.umem;
|
struct ib_umem *umem = base->ubuffer.umem;
|
||||||
@ -238,14 +267,9 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
|
|||||||
size_t bytes_copied;
|
size_t bytes_copied;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = mlx5_ib_read_user_wqe_common(umem,
|
ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
|
||||||
buffer,
|
wq->offset, wq->wqe_cnt,
|
||||||
buflen,
|
wq->wqe_shift, buflen,
|
||||||
wqe_index,
|
|
||||||
wq->offset,
|
|
||||||
wq->wqe_cnt,
|
|
||||||
wq->wqe_shift,
|
|
||||||
buflen,
|
|
||||||
&bytes_copied);
|
&bytes_copied);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -254,25 +278,33 @@ int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
|
int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
|
||||||
int wqe_index,
|
size_t buflen, size_t *bc)
|
||||||
void *buffer,
|
{
|
||||||
int buflen,
|
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
|
||||||
size_t *bc)
|
struct ib_umem *umem = base->ubuffer.umem;
|
||||||
|
struct mlx5_ib_wq *wq = &qp->rq;
|
||||||
|
size_t wqe_size = 1 << wq->wqe_shift;
|
||||||
|
|
||||||
|
if (buflen < wqe_size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!umem)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
|
||||||
|
void *buffer, size_t buflen, size_t *bc)
|
||||||
{
|
{
|
||||||
struct ib_umem *umem = srq->umem;
|
struct ib_umem *umem = srq->umem;
|
||||||
size_t bytes_copied;
|
size_t bytes_copied;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = mlx5_ib_read_user_wqe_common(umem,
|
ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
|
||||||
buffer,
|
srq->msrq.max, srq->msrq.wqe_shift,
|
||||||
buflen,
|
buflen, &bytes_copied);
|
||||||
wqe_index,
|
|
||||||
0,
|
|
||||||
srq->msrq.max,
|
|
||||||
srq->msrq.wqe_shift,
|
|
||||||
buflen,
|
|
||||||
&bytes_copied);
|
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -280,6 +312,21 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
|
||||||
|
size_t buflen, size_t *bc)
|
||||||
|
{
|
||||||
|
struct ib_umem *umem = srq->umem;
|
||||||
|
size_t wqe_size = 1 << srq->msrq.wqe_shift;
|
||||||
|
|
||||||
|
if (buflen < wqe_size)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!umem)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
|
||||||
|
}
|
||||||
|
|
||||||
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
|
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
|
||||||
{
|
{
|
||||||
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
|
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
|
||||||
|
Loading…
Reference in New Issue
Block a user