mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 18:14:48 +08:00
349d43127d
A crash occurs when smc_cdc_tx_handler() tries to access smc_sock
but smc_release() has already freed it.
[ 4570.695099] BUG: unable to handle page fault for address: 000000002eae9e88
[ 4570.696048] #PF: supervisor write access in kernel mode
[ 4570.696728] #PF: error_code(0x0002) - not-present page
[ 4570.697401] PGD 0 P4D 0
[ 4570.697716] Oops: 0002 [#1] PREEMPT SMP NOPTI
[ 4570.698228] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.16.0-rc4+ #111
[ 4570.699013] Hardware name: Alibaba Cloud Alibaba Cloud ECS, BIOS 8c24b4c 04/0
[ 4570.699933] RIP: 0010:_raw_spin_lock+0x1a/0x30
<...>
[ 4570.711446] Call Trace:
[ 4570.711746] <IRQ>
[ 4570.711992] smc_cdc_tx_handler+0x41/0xc0
[ 4570.712470] smc_wr_tx_tasklet_fn+0x213/0x560
[ 4570.712981] ? smc_cdc_tx_dismisser+0x10/0x10
[ 4570.713489] tasklet_action_common.isra.17+0x66/0x140
[ 4570.714083] __do_softirq+0x123/0x2f4
[ 4570.714521] irq_exit_rcu+0xc4/0xf0
[ 4570.714934] common_interrupt+0xba/0xe0
Though smc_cdc_tx_handler() checked the existence of smc connection,
smc_release() may have already dismissed and released the smc socket
before smc_cdc_tx_handler() further visits it.
smc_cdc_tx_handler() |smc_release()
if (!conn) |
|
|smc_cdc_tx_dismiss_slots()
| smc_cdc_tx_dismisser()
|
|sock_put(&smc->sk) <- last sock_put,
| smc_sock freed
bh_lock_sock(&smc->sk) (panic) |
To make sure we won't receive any CDC messages after we free the
smc_sock, add a refcount on the smc_connection for inflight CDC
message(posted to the QP but haven't received related CQE), and
don't release the smc_connection until all the inflight CDC messages
haven been done, for both success or failed ones.
Using refcount on CDC messages brings another problem: when the link
is going to be destroyed, smcr_link_clear() will reset the QP, which
then remove all the pending CQEs related to the QP in the CQ. To make
sure all the CQEs will always come back so the refcount on the
smc_connection can always reach 0, smc_ib_modify_qp_reset() was replaced
by smc_ib_modify_qp_error().
And remove the timeout in smc_wr_tx_wait_no_pending_sends() since we
need to wait for all pending WQEs done, or we may encounter use-after-
free when handling CQEs.
For IB device removal routine, we need to wait for all the QPs on that
device been destroyed before we can destroy CQs on the device, or
the refcount on smc_connection won't reach 0 and smc_sock cannot be
released.
Fixes: 5f08318f61
("smc: connection data control (CDC)")
Reported-by: Wen Gu <guwen@linux.alibaba.com>
Signed-off-by: Dust Li <dust.li@linux.alibaba.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
306 lines
8.3 KiB
C
306 lines
8.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Shared Memory Communications over RDMA (SMC-R) and RoCE
|
|
*
|
|
* Connection Data Control (CDC)
|
|
*
|
|
* Copyright IBM Corp. 2016
|
|
*
|
|
* Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
|
|
*/
|
|
|
|
#ifndef SMC_CDC_H
|
|
#define SMC_CDC_H
|
|
|
|
#include <linux/kernel.h> /* max_t */
|
|
#include <linux/atomic.h>
|
|
#include <linux/in.h>
|
|
#include <linux/compiler.h>
|
|
|
|
#include "smc.h"
|
|
#include "smc_core.h"
|
|
#include "smc_wr.h"
|
|
|
|
#define SMC_CDC_MSG_TYPE 0xFE
|
|
|
|
/* in network byte order */
|
|
union smc_cdc_cursor { /* SMC cursor */
|
|
struct {
|
|
__be16 reserved;
|
|
__be16 wrap;
|
|
__be32 count;
|
|
};
|
|
#ifdef KERNEL_HAS_ATOMIC64
|
|
atomic64_t acurs; /* for atomic processing */
|
|
#else
|
|
u64 acurs; /* for atomic processing */
|
|
#endif
|
|
} __aligned(8);
|
|
|
|
/* in network byte order */
|
|
struct smc_cdc_msg {
|
|
struct smc_wr_rx_hdr common; /* .type = 0xFE */
|
|
u8 len; /* 44 */
|
|
__be16 seqno;
|
|
__be32 token;
|
|
union smc_cdc_cursor prod;
|
|
union smc_cdc_cursor cons; /* piggy backed "ack" */
|
|
struct smc_cdc_producer_flags prod_flags;
|
|
struct smc_cdc_conn_state_flags conn_state_flags;
|
|
u8 reserved[18];
|
|
};
|
|
|
|
/* SMC-D cursor format */
|
|
union smcd_cdc_cursor {
|
|
struct {
|
|
u16 wrap;
|
|
u32 count;
|
|
struct smc_cdc_producer_flags prod_flags;
|
|
struct smc_cdc_conn_state_flags conn_state_flags;
|
|
} __packed;
|
|
#ifdef KERNEL_HAS_ATOMIC64
|
|
atomic64_t acurs; /* for atomic processing */
|
|
#else
|
|
u64 acurs; /* for atomic processing */
|
|
#endif
|
|
} __aligned(8);
|
|
|
|
/* CDC message for SMC-D */
|
|
struct smcd_cdc_msg {
|
|
struct smc_wr_rx_hdr common; /* Type = 0xFE */
|
|
u8 res1[7];
|
|
union smcd_cdc_cursor prod;
|
|
union smcd_cdc_cursor cons;
|
|
u8 res3[8];
|
|
} __aligned(8);
|
|
|
|
static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
|
|
{
|
|
return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort ||
|
|
conn->local_rx_ctrl.conn_state_flags.peer_conn_closed;
|
|
}
|
|
|
|
static inline bool smc_cdc_rxed_any_close_or_senddone(
|
|
struct smc_connection *conn)
|
|
{
|
|
return smc_cdc_rxed_any_close(conn) ||
|
|
conn->local_rx_ctrl.conn_state_flags.peer_done_writing;
|
|
}
|
|
|
|
static inline void smc_curs_add(int size, union smc_host_cursor *curs,
|
|
int value)
|
|
{
|
|
curs->count += value;
|
|
if (curs->count >= size) {
|
|
curs->wrap++;
|
|
curs->count -= size;
|
|
}
|
|
}
|
|
|
|
/* Copy cursor src into tgt */
|
|
static inline void smc_curs_copy(union smc_host_cursor *tgt,
|
|
union smc_host_cursor *src,
|
|
struct smc_connection *conn)
|
|
{
|
|
#ifndef KERNEL_HAS_ATOMIC64
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&conn->acurs_lock, flags);
|
|
tgt->acurs = src->acurs;
|
|
spin_unlock_irqrestore(&conn->acurs_lock, flags);
|
|
#else
|
|
atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
|
|
#endif
|
|
}
|
|
|
|
static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
|
|
union smc_cdc_cursor *src,
|
|
struct smc_connection *conn)
|
|
{
|
|
#ifndef KERNEL_HAS_ATOMIC64
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&conn->acurs_lock, flags);
|
|
tgt->acurs = src->acurs;
|
|
spin_unlock_irqrestore(&conn->acurs_lock, flags);
|
|
#else
|
|
atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
|
|
#endif
|
|
}
|
|
|
|
static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
|
|
union smcd_cdc_cursor *src,
|
|
struct smc_connection *conn)
|
|
{
|
|
#ifndef KERNEL_HAS_ATOMIC64
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&conn->acurs_lock, flags);
|
|
tgt->acurs = src->acurs;
|
|
spin_unlock_irqrestore(&conn->acurs_lock, flags);
|
|
#else
|
|
atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
|
|
#endif
|
|
}
|
|
|
|
/* calculate cursor difference between old and new, where old <= new and
|
|
* difference cannot exceed size
|
|
*/
|
|
static inline int smc_curs_diff(unsigned int size,
|
|
union smc_host_cursor *old,
|
|
union smc_host_cursor *new)
|
|
{
|
|
if (old->wrap != new->wrap)
|
|
return max_t(int, 0,
|
|
((size - old->count) + new->count));
|
|
|
|
return max_t(int, 0, (new->count - old->count));
|
|
}
|
|
|
|
/* calculate cursor difference between old and new - returns negative
|
|
* value in case old > new
|
|
*/
|
|
static inline int smc_curs_comp(unsigned int size,
|
|
union smc_host_cursor *old,
|
|
union smc_host_cursor *new)
|
|
{
|
|
if (old->wrap > new->wrap ||
|
|
(old->wrap == new->wrap && old->count > new->count))
|
|
return -smc_curs_diff(size, new, old);
|
|
return smc_curs_diff(size, old, new);
|
|
}
|
|
|
|
/* calculate cursor difference between old and new, where old <= new and
|
|
* difference may exceed size
|
|
*/
|
|
static inline int smc_curs_diff_large(unsigned int size,
|
|
union smc_host_cursor *old,
|
|
union smc_host_cursor *new)
|
|
{
|
|
if (old->wrap < new->wrap)
|
|
return min_t(int,
|
|
(size - old->count) + new->count +
|
|
(new->wrap - old->wrap - 1) * size,
|
|
size);
|
|
|
|
if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */
|
|
return min_t(int,
|
|
(size - old->count) + new->count +
|
|
(new->wrap + 0xffff - old->wrap) * size,
|
|
size);
|
|
|
|
return max_t(int, 0, (new->count - old->count));
|
|
}
|
|
|
|
static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
|
|
union smc_host_cursor *local,
|
|
union smc_host_cursor *save,
|
|
struct smc_connection *conn)
|
|
{
|
|
smc_curs_copy(save, local, conn);
|
|
peer->count = htonl(save->count);
|
|
peer->wrap = htons(save->wrap);
|
|
/* peer->reserved = htons(0); must be ensured by caller */
|
|
}
|
|
|
|
static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
|
|
struct smc_connection *conn,
|
|
union smc_host_cursor *save)
|
|
{
|
|
struct smc_host_cdc_msg *local = &conn->local_tx_ctrl;
|
|
|
|
peer->common.type = local->common.type;
|
|
peer->len = local->len;
|
|
peer->seqno = htons(local->seqno);
|
|
peer->token = htonl(local->token);
|
|
smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn);
|
|
smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn);
|
|
peer->prod_flags = local->prod_flags;
|
|
peer->conn_state_flags = local->conn_state_flags;
|
|
}
|
|
|
|
static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local,
|
|
union smc_cdc_cursor *peer,
|
|
struct smc_connection *conn)
|
|
{
|
|
union smc_host_cursor temp, old;
|
|
union smc_cdc_cursor net;
|
|
|
|
smc_curs_copy(&old, local, conn);
|
|
smc_curs_copy_net(&net, peer, conn);
|
|
temp.count = ntohl(net.count);
|
|
temp.wrap = ntohs(net.wrap);
|
|
if ((old.wrap > temp.wrap) && temp.wrap)
|
|
return;
|
|
if ((old.wrap == temp.wrap) &&
|
|
(old.count > temp.count))
|
|
return;
|
|
smc_curs_copy(local, &temp, conn);
|
|
}
|
|
|
|
static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
|
|
struct smc_cdc_msg *peer,
|
|
struct smc_connection *conn)
|
|
{
|
|
local->common.type = peer->common.type;
|
|
local->len = peer->len;
|
|
local->seqno = ntohs(peer->seqno);
|
|
local->token = ntohl(peer->token);
|
|
smc_cdc_cursor_to_host(&local->prod, &peer->prod, conn);
|
|
smc_cdc_cursor_to_host(&local->cons, &peer->cons, conn);
|
|
local->prod_flags = peer->prod_flags;
|
|
local->conn_state_flags = peer->conn_state_flags;
|
|
}
|
|
|
|
static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
|
|
struct smcd_cdc_msg *peer,
|
|
struct smc_connection *conn)
|
|
{
|
|
union smc_host_cursor temp;
|
|
|
|
temp.wrap = peer->prod.wrap;
|
|
temp.count = peer->prod.count;
|
|
smc_curs_copy(&local->prod, &temp, conn);
|
|
|
|
temp.wrap = peer->cons.wrap;
|
|
temp.count = peer->cons.count;
|
|
smc_curs_copy(&local->cons, &temp, conn);
|
|
local->prod_flags = peer->cons.prod_flags;
|
|
local->conn_state_flags = peer->cons.conn_state_flags;
|
|
}
|
|
|
|
static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
|
|
struct smc_cdc_msg *peer,
|
|
struct smc_connection *conn)
|
|
{
|
|
if (conn->lgr->is_smcd)
|
|
smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer, conn);
|
|
else
|
|
smcr_cdc_msg_to_host(local, peer, conn);
|
|
}
|
|
|
|
struct smc_cdc_tx_pend {
|
|
struct smc_connection *conn; /* socket connection */
|
|
union smc_host_cursor cursor; /* tx sndbuf cursor sent */
|
|
union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
|
|
u16 ctrl_seq; /* conn. tx sequence # */
|
|
};
|
|
|
|
int smc_cdc_get_free_slot(struct smc_connection *conn,
|
|
struct smc_link *link,
|
|
struct smc_wr_buf **wr_buf,
|
|
struct smc_rdma_wr **wr_rdma_buf,
|
|
struct smc_cdc_tx_pend **pend);
|
|
void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
|
|
int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
|
|
struct smc_cdc_tx_pend *pend);
|
|
int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
|
|
int smcd_cdc_msg_send(struct smc_connection *conn);
|
|
int smcr_cdc_msg_send_validation(struct smc_connection *conn,
|
|
struct smc_cdc_tx_pend *pend,
|
|
struct smc_wr_buf *wr_buf);
|
|
int smc_cdc_init(void) __init;
|
|
void smcd_cdc_rx_init(struct smc_connection *conn);
|
|
|
|
#endif /* SMC_CDC_H */
|