crypto: chtls - Fixed memory leak

Freed work request skbs when connection terminates.
enqueue_wr()/ dequeue_wr() is shared between softirq
and application contexts, should be protected by socket
lock. Moved dequeue_wr() to appropriate file.

Signed-off-by: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Vinay Kumar Yadav 2019-12-19 16:21:48 +05:30 committed by Herbert Xu
parent 596d0a2895
commit 93e23eb2ed
3 changed files with 38 additions and 13 deletions

View File

@ -727,6 +727,14 @@ static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
return 0;
}
static void chtls_purge_wr_queue(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = dequeue_wr(sk)) != NULL)
kfree_skb(skb);
}
static void chtls_release_resources(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@ -741,6 +749,11 @@ static void chtls_release_resources(struct sock *sk)
kfree_skb(csk->txdata_skb_cache);
csk->txdata_skb_cache = NULL;
if (csk->wr_credits != csk->wr_max_credits) {
chtls_purge_wr_queue(sk);
chtls_reset_wr_list(csk);
}
if (csk->l2t_entry) {
cxgb4_l2t_release(csk->l2t_entry);
csk->l2t_entry = NULL;
@ -1735,6 +1748,7 @@ static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
kfree_skb(skb);
}
static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
@ -2062,19 +2076,6 @@ rel_skb:
return 0;
}
static struct sk_buff *dequeue_wr(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct sk_buff *skb = csk->wr_skb_head;
if (likely(skb)) {
/* Don't bother clearing the tail */
csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
WR_SKB_CB(skb)->next_wr = NULL;
}
return skb;
}
static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
{
struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR;

View File

@ -185,6 +185,12 @@ static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
static inline void chtls_reset_wr_list(struct chtls_sock *csk)
{
csk->wr_skb_head = NULL;
csk->wr_skb_tail = NULL;
}
static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
{
WR_SKB_CB(skb)->next_wr = NULL;
@ -197,4 +203,19 @@ static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
csk->wr_skb_tail = skb;
}
static inline struct sk_buff *dequeue_wr(struct sock *sk)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct sk_buff *skb = NULL;
skb = csk->wr_skb_head;
if (likely(skb)) {
/* Don't bother clearing the tail */
csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
WR_SKB_CB(skb)->next_wr = NULL;
}
return skb;
}
#endif

View File

@ -376,6 +376,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
kwr->sc_imm.cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
kwr->sc_imm.len = cpu_to_be32(klen);
lock_sock(sk);
/* key info */
kctx = (struct _key_ctx *)(kwr + 1);
ret = chtls_key_info(csk, kctx, keylen, optname, cipher_type);
@ -414,8 +415,10 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
csk->tlshws.txkey = keyid;
}
release_sock(sk);
return ret;
out_notcb:
release_sock(sk);
free_tls_keyid(sk);
out_nokey:
kfree_skb(skb);