net/smc: fix wait on already cleared link

There can be a race between the waiters for a tx work request buffer
and the link down processing that finally clears the link. Although
all waiters are woken up before the link is cleared there might be
waiters which did not yet get back control and are still waiting.
This results in an access to a cleared wait queue head.

Fix this by introducing atomic reference counting around the wait calls,
and wait with the link clear processing until all waiters have finished.
Move the work request layer related calls into smc_wr.c and set the
link state to INACTIVE before calling smcr_link_clear() in
smc_llc_srv_add_link().

Fixes: 15e1b99aad ("net/smc: no WR buffer wait for terminating link group")
Signed-off-by: Karsten Graul <kgraul@linux.ibm.com>
Signed-off-by: Guvenc Gulce <guvenc@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Karsten Graul 2021-08-09 11:05:56 +02:00 committed by David S. Miller
parent acc68b8d2a
commit 8f3d65c166
4 changed files with 33 additions and 7 deletions

View File

@ -97,6 +97,7 @@ struct smc_link {
unsigned long *wr_tx_mask; /* bit mask of used indexes */ unsigned long *wr_tx_mask; /* bit mask of used indexes */
u32 wr_tx_cnt; /* number of WR send buffers */ u32 wr_tx_cnt; /* number of WR send buffers */
wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */ wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */
atomic_t wr_tx_refcnt; /* tx refs to link */
struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */ struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */
struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */ struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */
@ -109,6 +110,7 @@ struct smc_link {
struct ib_reg_wr wr_reg; /* WR register memory region */ struct ib_reg_wr wr_reg; /* WR register memory region */
wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
atomic_t wr_reg_refcnt; /* reg refs to link */
enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */
u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/ u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/

View File

@ -888,6 +888,7 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
if (!rc) if (!rc)
goto out; goto out;
out_clear_lnk: out_clear_lnk:
lnk_new->state = SMC_LNK_INACTIVE;
smcr_link_clear(lnk_new, false); smcr_link_clear(lnk_new, false);
out_reject: out_reject:
smc_llc_cli_add_link_reject(qentry); smc_llc_cli_add_link_reject(qentry);
@ -1184,6 +1185,7 @@ int smc_llc_srv_add_link(struct smc_link *link)
goto out_err; goto out_err;
return 0; return 0;
out_err: out_err:
link_new->state = SMC_LNK_INACTIVE;
smcr_link_clear(link_new, false); smcr_link_clear(link_new, false);
return rc; return rc;
} }
@ -1286,10 +1288,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
del_llc->reason = 0; del_llc->reason = 0;
smc_llc_send_message(lnk, &qentry->msg); /* response */ smc_llc_send_message(lnk, &qentry->msg); /* response */
if (smc_link_downing(&lnk_del->state)) { if (smc_link_downing(&lnk_del->state))
if (smc_switch_conns(lgr, lnk_del, false)) smc_switch_conns(lgr, lnk_del, false);
smc_wr_tx_wait_no_pending_sends(lnk_del);
}
smcr_link_clear(lnk_del, true); smcr_link_clear(lnk_del, true);
active_links = smc_llc_active_link_count(lgr); active_links = smc_llc_active_link_count(lgr);
@ -1805,8 +1805,6 @@ void smc_llc_link_clear(struct smc_link *link, bool log)
link->smcibdev->ibdev->name, link->ibport); link->smcibdev->ibdev->name, link->ibport);
complete(&link->llc_testlink_resp); complete(&link->llc_testlink_resp);
cancel_delayed_work_sync(&link->llc_testlink_wrk); cancel_delayed_work_sync(&link->llc_testlink_wrk);
smc_wr_wakeup_reg_wait(link);
smc_wr_wakeup_tx_wait(link);
} }
/* register a new rtoken at the remote peer (for all links) */ /* register a new rtoken at the remote peer (for all links) */

View File

@ -496,7 +496,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
/* Wakeup sndbuf consumers from any context (IRQ or process) /* Wakeup sndbuf consumers from any context (IRQ or process)
* since there is more data to transmit; usable snd_wnd as max transmit * since there is more data to transmit; usable snd_wnd as max transmit
*/ */
static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
{ {
struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
struct smc_link *link = conn->lnk; struct smc_link *link = conn->lnk;
@ -550,6 +550,22 @@ out_unlock:
return rc; return rc;
} }
static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
{
struct smc_link *link = conn->lnk;
int rc = -ENOLINK;
if (!link)
return rc;
atomic_inc(&link->wr_tx_refcnt);
if (smc_link_usable(link))
rc = _smcr_tx_sndbuf_nonempty(conn);
if (atomic_dec_and_test(&link->wr_tx_refcnt))
wake_up_all(&link->wr_tx_wait);
return rc;
}
static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
{ {
struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;

View File

@ -322,9 +322,12 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
if (rc) if (rc)
return rc; return rc;
atomic_inc(&link->wr_reg_refcnt);
rc = wait_event_interruptible_timeout(link->wr_reg_wait, rc = wait_event_interruptible_timeout(link->wr_reg_wait,
(link->wr_reg_state != POSTED), (link->wr_reg_state != POSTED),
SMC_WR_REG_MR_WAIT_TIME); SMC_WR_REG_MR_WAIT_TIME);
if (atomic_dec_and_test(&link->wr_reg_refcnt))
wake_up_all(&link->wr_reg_wait);
if (!rc) { if (!rc) {
/* timeout - terminate link */ /* timeout - terminate link */
smcr_link_down_cond_sched(link); smcr_link_down_cond_sched(link);
@ -566,10 +569,15 @@ void smc_wr_free_link(struct smc_link *lnk)
return; return;
ibdev = lnk->smcibdev->ibdev; ibdev = lnk->smcibdev->ibdev;
smc_wr_wakeup_reg_wait(lnk);
smc_wr_wakeup_tx_wait(lnk);
if (smc_wr_tx_wait_no_pending_sends(lnk)) if (smc_wr_tx_wait_no_pending_sends(lnk))
memset(lnk->wr_tx_mask, 0, memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) * BITS_TO_LONGS(SMC_WR_BUF_CNT) *
sizeof(*lnk->wr_tx_mask)); sizeof(*lnk->wr_tx_mask));
wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
if (lnk->wr_rx_dma_addr) { if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
@ -728,7 +736,9 @@ int smc_wr_create_link(struct smc_link *lnk)
memset(lnk->wr_tx_mask, 0, memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
init_waitqueue_head(&lnk->wr_tx_wait); init_waitqueue_head(&lnk->wr_tx_wait);
atomic_set(&lnk->wr_tx_refcnt, 0);
init_waitqueue_head(&lnk->wr_reg_wait); init_waitqueue_head(&lnk->wr_reg_wait);
atomic_set(&lnk->wr_reg_refcnt, 0);
return rc; return rc;
dma_unmap: dma_unmap: