mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-11 00:04:33 +08:00
RDMA/rxe: Cleanup error state handling in rxe_comp.c
Cleanup the handling of qp in the error state, reset state and during rxe_qp_do_cleanup. Make the same as rxe_resp.c Link: https://lore.kernel.org/r/20230304174533.11296-5-rpearsonhpe@gmail.com Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
49dc9c1f0c
commit
fbdeb828a2
@ -542,25 +542,60 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
|
||||
return COMPST_GET_WQE;
|
||||
}
|
||||
|
||||
static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
|
||||
/* drain incoming response packet queue */
|
||||
static void drain_resp_pkts(struct rxe_qp *qp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct rxe_send_wqe *wqe;
|
||||
struct rxe_queue *q = qp->sq.queue;
|
||||
|
||||
while ((skb = skb_dequeue(&qp->resp_pkts))) {
|
||||
rxe_put(qp);
|
||||
kfree_skb(skb);
|
||||
ib_device_put(qp->ibqp.device);
|
||||
}
|
||||
}
|
||||
|
||||
/* complete send wqe with flush error */
|
||||
static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
||||
{
|
||||
struct rxe_cqe cqe = {};
|
||||
struct ib_wc *wc = &cqe.ibwc;
|
||||
struct ib_uverbs_wc *uwc = &cqe.uibwc;
|
||||
int err;
|
||||
|
||||
if (qp->is_user) {
|
||||
uwc->wr_id = wqe->wr.wr_id;
|
||||
uwc->status = IB_WC_WR_FLUSH_ERR;
|
||||
uwc->qp_num = qp->ibqp.qp_num;
|
||||
} else {
|
||||
wc->wr_id = wqe->wr.wr_id;
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
wc->qp = &qp->ibqp;
|
||||
}
|
||||
|
||||
err = rxe_cq_post(qp->scq, &cqe, 0);
|
||||
if (err)
|
||||
rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* drain and optionally complete the send queue
|
||||
* if unable to complete a wqe, i.e. cq is full, stop
|
||||
* completing and flush the remaining wqes
|
||||
*/
|
||||
static void flush_send_queue(struct rxe_qp *qp, bool notify)
|
||||
{
|
||||
struct rxe_send_wqe *wqe;
|
||||
struct rxe_queue *q = qp->sq.queue;
|
||||
int err;
|
||||
|
||||
while ((wqe = queue_head(q, q->type))) {
|
||||
if (notify) {
|
||||
wqe->status = IB_WC_WR_FLUSH_ERR;
|
||||
do_complete(qp, wqe);
|
||||
} else {
|
||||
queue_advance_consumer(q, q->type);
|
||||
err = flush_send_wqe(qp, wqe);
|
||||
if (err)
|
||||
notify = 0;
|
||||
}
|
||||
queue_advance_consumer(q, q->type);
|
||||
}
|
||||
}
|
||||
|
||||
@ -589,8 +624,10 @@ int rxe_completer(struct rxe_qp *qp)
|
||||
|
||||
if (!qp->valid || qp->comp.state == QP_STATE_ERROR ||
|
||||
qp->comp.state == QP_STATE_RESET) {
|
||||
rxe_drain_resp_pkts(qp, qp->valid &&
|
||||
qp->comp.state == QP_STATE_ERROR);
|
||||
bool notify = qp->valid &&
|
||||
(qp->comp.state == QP_STATE_ERROR);
|
||||
drain_resp_pkts(qp);
|
||||
flush_send_queue(qp, notify);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
@ -114,6 +114,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
|
||||
|
||||
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
|
||||
if (unlikely(full)) {
|
||||
rxe_err_cq(cq, "queue full");
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
if (cq->ibcq.event_handler) {
|
||||
ev.device = cq->ibcq.device;
|
||||
|
@ -1396,7 +1396,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
|
||||
}
|
||||
|
||||
/* drain incoming request packet queue */
|
||||
static void rxe_drain_req_pkts(struct rxe_qp *qp)
|
||||
static void drain_req_pkts(struct rxe_qp *qp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -1408,33 +1408,35 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp)
|
||||
}
|
||||
|
||||
/* complete receive wqe with flush error */
|
||||
static int complete_flush(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
|
||||
static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
|
||||
{
|
||||
struct rxe_cqe cqe = {};
|
||||
struct ib_wc *wc = &cqe.ibwc;
|
||||
struct ib_uverbs_wc *uwc = &cqe.uibwc;
|
||||
int err;
|
||||
|
||||
if (qp->rcq->is_user) {
|
||||
uwc->wr_id = wqe->wr_id;
|
||||
uwc->status = IB_WC_WR_FLUSH_ERR;
|
||||
uwc->qp_num = qp_num(qp);
|
||||
uwc->wr_id = wqe->wr_id;
|
||||
} else {
|
||||
wc->wr_id = wqe->wr_id;
|
||||
wc->status = IB_WC_WR_FLUSH_ERR;
|
||||
wc->qp = &qp->ibqp;
|
||||
wc->wr_id = wqe->wr_id;
|
||||
}
|
||||
|
||||
if (rxe_cq_post(qp->rcq, &cqe, 0))
|
||||
return -ENOMEM;
|
||||
err = rxe_cq_post(qp->rcq, &cqe, 0);
|
||||
if (err)
|
||||
rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* drain and optionally complete the recive queue
|
||||
* if unable to complete a wqe stop completing and
|
||||
* just flush the remaining wqes
|
||||
*/
|
||||
static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify)
|
||||
static void flush_recv_queue(struct rxe_qp *qp, bool notify)
|
||||
{
|
||||
struct rxe_queue *q = qp->rq.queue;
|
||||
struct rxe_recv_wqe *wqe;
|
||||
@ -1445,11 +1447,9 @@ static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify)
|
||||
|
||||
while ((wqe = queue_head(q, q->type))) {
|
||||
if (notify) {
|
||||
err = complete_flush(qp, wqe);
|
||||
if (err) {
|
||||
rxe_dbg_qp(qp, "complete failed for recv wqe");
|
||||
err = flush_recv_wqe(qp, wqe);
|
||||
if (err)
|
||||
notify = 0;
|
||||
}
|
||||
}
|
||||
queue_advance_consumer(q, q->type);
|
||||
}
|
||||
@ -1471,8 +1471,8 @@ int rxe_responder(struct rxe_qp *qp)
|
||||
qp->resp.state == QP_STATE_RESET) {
|
||||
bool notify = qp->valid &&
|
||||
(qp->resp.state == QP_STATE_ERROR);
|
||||
rxe_drain_req_pkts(qp);
|
||||
rxe_drain_recv_queue(qp, notify);
|
||||
drain_req_pkts(qp);
|
||||
flush_recv_queue(qp, notify);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user