net: tcp: add skb drop reasons to tcp_v{4,6}_inbound_md5_hash()

Pass the address of drop reason to tcp_v4_inbound_md5_hash() and
tcp_v6_inbound_md5_hash() to store the reasons for skb drops when this
function fails. Therefore, the drop reason can be passed to
kfree_skb_reason() when the skb needs to be freed.

Following drop reasons are added:

SKB_DROP_REASON_TCP_MD5NOTFOUND
SKB_DROP_REASON_TCP_MD5UNEXPECTED
SKB_DROP_REASON_TCP_MD5FAILURE

SKB_DROP_REASON_TCP_MD5* above correspond to LINUX_MIB_TCPMD5*

Reviewed-by: Mengen Sun <mengensun@tencent.com>
Reviewed-by: Hao Peng <flyingpeng@tencent.com>
Signed-off-by: Menglong Dong <imagedong@tencent.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Menglong Dong 2022-02-20 15:06:32 +08:00 committed by David S. Miller
parent c0e3154d9c
commit 643b622b51
4 changed files with 33 additions and 7 deletions

View File

@ -346,6 +346,18 @@ enum skb_drop_reason {
* udp packet drop out of
* udp_memory_allocated.
*/
SKB_DROP_REASON_TCP_MD5NOTFOUND, /* no MD5 hash and one
* expected, corresponding
* to LINUX_MIB_TCPMD5NOTFOUND
*/
SKB_DROP_REASON_TCP_MD5UNEXPECTED, /* MD5 hash and we're not
* expecting one, corresponding
* to LINUX_MIB_TCPMD5UNEXPECTED
*/
SKB_DROP_REASON_TCP_MD5FAILURE, /* MD5 hash and its wrong,
* corresponding to
* LINUX_MIB_TCPMD5FAILURE
*/
SKB_DROP_REASON_MAX,
};

View File

@ -27,6 +27,10 @@
EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \
EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF) \
EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM) \
EM(SKB_DROP_REASON_TCP_MD5NOTFOUND, TCP_MD5NOTFOUND) \
EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED, \
TCP_MD5UNEXPECTED) \
EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \
EMe(SKB_DROP_REASON_MAX, MAX)
#undef EM

View File

@ -1412,7 +1412,8 @@ EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
/* Called with rcu_read_lock() */
static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
const struct sk_buff *skb,
int dif, int sdif)
int dif, int sdif,
enum skb_drop_reason *reason)
{
#ifdef CONFIG_TCP_MD5SIG
/*
@ -1445,11 +1446,13 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
return false;
if (hash_expected && !hash_location) {
*reason = SKB_DROP_REASON_TCP_MD5NOTFOUND;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true;
}
if (!hash_expected && hash_location) {
*reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true;
}
@ -1462,6 +1465,7 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
*reason = SKB_DROP_REASON_TCP_MD5FAILURE;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
&iph->saddr, ntohs(th->source),
@ -1971,13 +1975,13 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
int tcp_v4_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
enum skb_drop_reason drop_reason;
int sdif = inet_sdif(skb);
int dif = inet_iif(skb);
const struct iphdr *iph;
const struct tcphdr *th;
bool refcounted;
struct sock *sk;
int drop_reason;
int ret;
drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
@ -2025,7 +2029,8 @@ process:
struct sock *nsk;
sk = req->rsk_listener;
if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif,
&drop_reason))) {
sk_drops_add(sk, skb);
reqsk_put(req);
goto discard_it;
@ -2099,7 +2104,7 @@ process:
goto discard_and_relse;
}
if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason))
goto discard_and_relse;
nf_reset_ct(skb);

View File

@ -775,7 +775,8 @@ clear_hash_noput:
static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
const struct sk_buff *skb,
int dif, int sdif)
int dif, int sdif,
enum skb_drop_reason *reason)
{
#ifdef CONFIG_TCP_MD5SIG
const __u8 *hash_location = NULL;
@ -798,11 +799,13 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
return false;
if (hash_expected && !hash_location) {
*reason = SKB_DROP_REASON_TCP_MD5NOTFOUND;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true;
}
if (!hash_expected && hash_location) {
*reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true;
}
@ -813,6 +816,7 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
*reason = SKB_DROP_REASON_TCP_MD5FAILURE;
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
genhash ? "failed" : "mismatch",
@ -1681,7 +1685,8 @@ process:
struct sock *nsk;
sk = req->rsk_listener;
if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif,
&drop_reason)) {
sk_drops_add(sk, skb);
reqsk_put(req);
goto discard_it;
@ -1752,7 +1757,7 @@ process:
goto discard_and_relse;
}
if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason))
goto discard_and_relse;
if (tcp_filter(sk, skb)) {