2019-07-09 15:30:48 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
|
|
|
|
/* -
|
|
|
|
* net/sched/act_ct.c Connection Tracking action
|
|
|
|
*
|
|
|
|
* Authors: Paul Blakey <paulb@mellanox.com>
|
|
|
|
* Yossi Kuperman <yossiku@mellanox.com>
|
|
|
|
* Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <linux/pkt_cls.h>
|
|
|
|
#include <linux/ip.h>
|
|
|
|
#include <linux/ipv6.h>
|
2020-03-03 21:07:49 +08:00
|
|
|
#include <linux/rhashtable.h>
|
2019-07-09 15:30:48 +08:00
|
|
|
#include <net/netlink.h>
|
|
|
|
#include <net/pkt_sched.h>
|
|
|
|
#include <net/pkt_cls.h>
|
|
|
|
#include <net/act_api.h>
|
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/ipv6_frag.h>
|
|
|
|
#include <uapi/linux/tc_act/tc_ct.h>
|
|
|
|
#include <net/tc_act/tc_ct.h>
|
2022-12-06 21:55:12 +08:00
|
|
|
#include <net/tc_wrapper.h>
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2020-03-03 21:07:49 +08:00
|
|
|
#include <net/netfilter/nf_flow_table.h>
|
2019-07-09 15:30:48 +08:00
|
|
|
#include <net/netfilter/nf_conntrack.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_core.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_zones.h>
|
|
|
|
#include <net/netfilter/nf_conntrack_helper.h>
|
2020-04-21 07:55:43 +08:00
|
|
|
#include <net/netfilter/nf_conntrack_acct.h>
|
2019-07-09 15:30:48 +08:00
|
|
|
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
|
2022-01-03 19:44:50 +08:00
|
|
|
#include <net/netfilter/nf_conntrack_act_ct.h>
|
2022-11-07 04:34:17 +08:00
|
|
|
#include <net/netfilter/nf_conntrack_seqadj.h>
|
2019-09-13 16:13:05 +08:00
|
|
|
#include <uapi/linux/netfilter/nf_nat.h>
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2020-03-03 21:07:49 +08:00
|
|
|
static struct workqueue_struct *act_ct_wq;
|
|
|
|
static struct rhashtable zones_ht;
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
static DEFINE_MUTEX(zones_mutex);
|
2020-03-03 21:07:49 +08:00
|
|
|
|
2024-06-16 05:47:30 +08:00
|
|
|
struct zones_ht_key {
|
|
|
|
struct net *net;
|
|
|
|
u16 zone;
|
|
|
|
};
|
|
|
|
|
2020-03-03 21:07:49 +08:00
|
|
|
struct tcf_ct_flow_table {
|
|
|
|
struct rhash_head node; /* In zones tables */
|
|
|
|
|
|
|
|
struct rcu_work rwork;
|
|
|
|
struct nf_flowtable nf_ft;
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
refcount_t ref;
|
2024-06-16 05:47:30 +08:00
|
|
|
struct zones_ht_key key;
|
2020-03-03 21:07:49 +08:00
|
|
|
|
|
|
|
bool dying;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct rhashtable_params zones_params = {
|
|
|
|
.head_offset = offsetof(struct tcf_ct_flow_table, node),
|
2024-06-16 05:47:30 +08:00
|
|
|
.key_offset = offsetof(struct tcf_ct_flow_table, key),
|
2024-08-09 23:22:12 +08:00
|
|
|
.key_len = offsetofend(struct zones_ht_key, zone),
|
2020-03-03 21:07:49 +08:00
|
|
|
.automatic_shrinking = true,
|
|
|
|
};
|
|
|
|
|
2020-03-12 18:23:06 +08:00
|
|
|
static struct flow_action_entry *
|
|
|
|
tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
|
|
|
|
{
|
|
|
|
int i = flow_action->num_entries++;
|
|
|
|
|
|
|
|
return &flow_action->entries[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcf_ct_add_mangle_action(struct flow_action *action,
|
|
|
|
enum flow_action_mangle_base htype,
|
|
|
|
u32 offset,
|
|
|
|
u32 mask,
|
|
|
|
u32 val)
|
|
|
|
{
|
|
|
|
struct flow_action_entry *entry;
|
|
|
|
|
|
|
|
entry = tcf_ct_flow_table_flow_action_get_next(action);
|
|
|
|
entry->id = FLOW_ACTION_MANGLE;
|
|
|
|
entry->mangle.htype = htype;
|
|
|
|
entry->mangle.mask = ~mask;
|
|
|
|
entry->mangle.offset = offset;
|
|
|
|
entry->mangle.val = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The following nat helper functions check if the inverted reverse tuple
|
|
|
|
* (target) is different then the current dir tuple - meaning nat for ports
|
|
|
|
* and/or ip is needed, and add the relevant mangle actions.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
|
|
|
|
struct nf_conntrack_tuple target,
|
|
|
|
struct flow_action *action)
|
|
|
|
{
|
|
|
|
if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
|
|
|
|
tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
|
|
|
|
offsetof(struct iphdr, saddr),
|
|
|
|
0xFFFFFFFF,
|
|
|
|
be32_to_cpu(target.src.u3.ip));
|
|
|
|
if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
|
|
|
|
tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
|
|
|
|
offsetof(struct iphdr, daddr),
|
|
|
|
0xFFFFFFFF,
|
|
|
|
be32_to_cpu(target.dst.u3.ip));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
|
|
|
|
union nf_inet_addr *addr,
|
|
|
|
u32 offset)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
|
|
|
|
tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
|
|
|
|
i * sizeof(u32) + offset,
|
|
|
|
0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
|
|
|
|
struct nf_conntrack_tuple target,
|
|
|
|
struct flow_action *action)
|
|
|
|
{
|
|
|
|
if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
|
|
|
|
tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
|
|
|
|
offsetof(struct ipv6hdr,
|
|
|
|
saddr));
|
|
|
|
if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
|
|
|
|
tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
|
|
|
|
offsetof(struct ipv6hdr,
|
|
|
|
daddr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
|
|
|
|
struct nf_conntrack_tuple target,
|
|
|
|
struct flow_action *action)
|
|
|
|
{
|
|
|
|
__be16 target_src = target.src.u.tcp.port;
|
|
|
|
__be16 target_dst = target.dst.u.tcp.port;
|
|
|
|
|
|
|
|
if (target_src != tuple->src.u.tcp.port)
|
|
|
|
tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
|
|
|
|
offsetof(struct tcphdr, source),
|
|
|
|
0xFFFF, be16_to_cpu(target_src));
|
|
|
|
if (target_dst != tuple->dst.u.tcp.port)
|
|
|
|
tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
|
|
|
|
offsetof(struct tcphdr, dest),
|
|
|
|
0xFFFF, be16_to_cpu(target_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
|
|
|
|
struct nf_conntrack_tuple target,
|
|
|
|
struct flow_action *action)
|
|
|
|
{
|
|
|
|
__be16 target_src = target.src.u.udp.port;
|
|
|
|
__be16 target_dst = target.dst.u.udp.port;
|
|
|
|
|
|
|
|
if (target_src != tuple->src.u.udp.port)
|
2020-10-19 17:02:44 +08:00
|
|
|
tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
|
2020-03-12 18:23:06 +08:00
|
|
|
offsetof(struct udphdr, source),
|
|
|
|
0xFFFF, be16_to_cpu(target_src));
|
|
|
|
if (target_dst != tuple->dst.u.udp.port)
|
2020-10-19 17:02:44 +08:00
|
|
|
tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
|
2020-03-12 18:23:06 +08:00
|
|
|
offsetof(struct udphdr, dest),
|
|
|
|
0xFFFF, be16_to_cpu(target_dst));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
|
|
|
|
enum ip_conntrack_dir dir,
|
2023-02-02 00:30:57 +08:00
|
|
|
enum ip_conntrack_info ctinfo,
|
2020-03-12 18:23:06 +08:00
|
|
|
struct flow_action *action)
|
|
|
|
{
|
|
|
|
struct nf_conn_labels *ct_labels;
|
|
|
|
struct flow_action_entry *entry;
|
|
|
|
u32 *act_ct_labels;
|
|
|
|
|
|
|
|
entry = tcf_ct_flow_table_flow_action_get_next(action);
|
|
|
|
entry->id = FLOW_ACTION_CT_METADATA;
|
|
|
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
|
2022-11-10 03:39:07 +08:00
|
|
|
entry->ct_metadata.mark = READ_ONCE(ct->mark);
|
2020-03-12 18:23:06 +08:00
|
|
|
#endif
|
net/sched: act_ct: Support restoring conntrack info on skbs
Provide an API to restore the ct state pointer.
This may be used by drivers to restore the ct state if they
miss in tc chain after they already did the hardware connection
tracking action (ct_metadata action).
For example, consider the following rule on chain 0 that is in_hw,
however chain 1 is not_in_hw:
$ tc filter add dev ... chain 0 ... \
flower ... action ct pipe action goto chain 1
Packets of a flow offloaded (via nf flow table offload) by the driver
hit this rule in hardware, will be marked with the ct metadata action
(mark, label, zone) that does the equivalent of the software ct action,
and when the packet jumps to hardware chain 1, there would be a miss.
CT was already processed in hardware. Therefore, the driver's miss
handling should restore the ct state on the skb, using the provided API,
and continue the packet processing in chain 1.
Signed-off-by: Paul Blakey <paulb@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-12 18:23:07 +08:00
|
|
|
/* aligns with the CT reference on the SKB nf_ct_set */
|
|
|
|
entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
|
2021-01-27 22:32:46 +08:00
|
|
|
entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
|
2020-03-12 18:23:06 +08:00
|
|
|
|
|
|
|
act_ct_labels = entry->ct_metadata.labels;
|
|
|
|
ct_labels = nf_ct_labels_find(ct);
|
|
|
|
if (ct_labels)
|
|
|
|
memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
|
|
|
|
else
|
|
|
|
memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_flow_table_add_action_nat(struct net *net,
|
|
|
|
struct nf_conn *ct,
|
|
|
|
enum ip_conntrack_dir dir,
|
|
|
|
struct flow_action *action)
|
|
|
|
{
|
|
|
|
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
|
|
|
|
struct nf_conntrack_tuple target;
|
|
|
|
|
2020-05-30 13:54:51 +08:00
|
|
|
if (!(ct->status & IPS_NAT_MASK))
|
|
|
|
return 0;
|
|
|
|
|
2020-03-12 18:23:06 +08:00
|
|
|
nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
|
|
|
|
|
|
|
|
switch (tuple->src.l3num) {
|
|
|
|
case NFPROTO_IPV4:
|
|
|
|
tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
|
|
|
|
action);
|
|
|
|
break;
|
|
|
|
case NFPROTO_IPV6:
|
|
|
|
tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
|
|
|
|
action);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (nf_ct_protonum(ct)) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_flow_table_fill_actions(struct net *net,
|
2023-02-02 00:30:57 +08:00
|
|
|
struct flow_offload *flow,
|
2020-03-12 18:23:06 +08:00
|
|
|
enum flow_offload_tuple_dir tdir,
|
|
|
|
struct nf_flow_rule *flow_rule)
|
|
|
|
{
|
|
|
|
struct flow_action *action = &flow_rule->rule->action;
|
|
|
|
int num_entries = action->num_entries;
|
|
|
|
struct nf_conn *ct = flow->ct;
|
2023-02-02 00:30:57 +08:00
|
|
|
enum ip_conntrack_info ctinfo;
|
2020-03-12 18:23:06 +08:00
|
|
|
enum ip_conntrack_dir dir;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
switch (tdir) {
|
|
|
|
case FLOW_OFFLOAD_DIR_ORIGINAL:
|
|
|
|
dir = IP_CT_DIR_ORIGINAL;
|
2023-02-02 00:30:58 +08:00
|
|
|
ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
|
|
|
|
IP_CT_ESTABLISHED : IP_CT_NEW;
|
|
|
|
if (ctinfo == IP_CT_ESTABLISHED)
|
|
|
|
set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
|
2020-03-12 18:23:06 +08:00
|
|
|
break;
|
|
|
|
case FLOW_OFFLOAD_DIR_REPLY:
|
|
|
|
dir = IP_CT_DIR_REPLY;
|
2023-02-02 00:30:57 +08:00
|
|
|
ctinfo = IP_CT_ESTABLISHED_REPLY;
|
2020-03-12 18:23:06 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
|
|
|
|
if (err)
|
|
|
|
goto err_nat;
|
|
|
|
|
2023-02-02 00:30:57 +08:00
|
|
|
tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action);
|
2020-03-12 18:23:06 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_nat:
|
|
|
|
/* Clear filled actions */
|
|
|
|
for (i = num_entries; i < action->num_entries; i++)
|
|
|
|
memset(&action->entries[i], 0, sizeof(action->entries[i]));
|
|
|
|
action->num_entries = num_entries;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-10-25 03:09:47 +08:00
|
|
|
static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
|
|
|
|
{
|
|
|
|
return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
|
2023-10-25 03:58:57 +08:00
|
|
|
test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
|
|
|
|
!test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
|
2023-10-25 03:09:47 +08:00
|
|
|
!test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
|
|
|
|
}
|
|
|
|
|
2023-12-06 01:25:54 +08:00
|
|
|
static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
|
|
|
|
|
|
|
|
static void tcf_ct_nf_get(struct nf_flowtable *ft)
|
|
|
|
{
|
|
|
|
struct tcf_ct_flow_table *ct_ft =
|
|
|
|
container_of(ft, struct tcf_ct_flow_table, nf_ft);
|
|
|
|
|
|
|
|
tcf_ct_flow_table_get_ref(ct_ft);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
|
|
|
|
|
|
|
|
static void tcf_ct_nf_put(struct nf_flowtable *ft)
|
|
|
|
{
|
|
|
|
struct tcf_ct_flow_table *ct_ft =
|
|
|
|
container_of(ft, struct tcf_ct_flow_table, nf_ft);
|
|
|
|
|
|
|
|
tcf_ct_flow_table_put(ct_ft);
|
|
|
|
}
|
|
|
|
|
2020-03-03 21:07:49 +08:00
|
|
|
static struct nf_flowtable_type flowtable_ct = {
|
2023-10-25 03:09:47 +08:00
|
|
|
.gc = tcf_ct_flow_is_outdated,
|
2020-03-12 18:23:06 +08:00
|
|
|
.action = tcf_ct_flow_table_fill_actions,
|
2023-12-06 01:25:54 +08:00
|
|
|
.get = tcf_ct_nf_get,
|
|
|
|
.put = tcf_ct_nf_put,
|
2020-03-03 21:07:49 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2022-06-15 18:43:54 +08:00
|
|
|
static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
|
2020-03-03 21:07:49 +08:00
|
|
|
{
|
2024-06-16 05:47:30 +08:00
|
|
|
struct zones_ht_key key = { .net = net, .zone = params->zone };
|
2020-03-03 21:07:49 +08:00
|
|
|
struct tcf_ct_flow_table *ct_ft;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
mutex_lock(&zones_mutex);
|
2024-06-16 05:47:30 +08:00
|
|
|
ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
|
|
|
|
goto out_unlock;
|
2020-03-03 21:07:49 +08:00
|
|
|
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
|
2020-03-03 21:07:49 +08:00
|
|
|
if (!ct_ft)
|
|
|
|
goto err_alloc;
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
refcount_set(&ct_ft->ref, 1);
|
2020-03-03 21:07:49 +08:00
|
|
|
|
2024-06-16 05:47:30 +08:00
|
|
|
ct_ft->key = key;
|
2020-03-03 21:07:49 +08:00
|
|
|
err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
|
|
|
|
if (err)
|
|
|
|
goto err_insert;
|
|
|
|
|
|
|
|
ct_ft->nf_ft.type = &flowtable_ct;
|
2020-11-27 02:40:49 +08:00
|
|
|
ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
|
|
|
|
NF_FLOWTABLE_COUNTER;
|
2020-03-03 21:07:49 +08:00
|
|
|
err = nf_flow_table_init(&ct_ft->nf_ft);
|
|
|
|
if (err)
|
|
|
|
goto err_init;
|
2022-06-15 18:43:54 +08:00
|
|
|
write_pnet(&ct_ft->nf_ft.net, net);
|
2020-03-03 21:07:49 +08:00
|
|
|
|
|
|
|
__module_get(THIS_MODULE);
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
out_unlock:
|
2020-03-03 21:07:49 +08:00
|
|
|
params->ct_ft = ct_ft;
|
2020-03-12 18:23:09 +08:00
|
|
|
params->nf_ft = &ct_ft->nf_ft;
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
mutex_unlock(&zones_mutex);
|
2020-03-03 21:07:49 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_init:
|
|
|
|
rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
|
|
|
|
err_insert:
|
|
|
|
kfree(ct_ft);
|
|
|
|
err_alloc:
|
net/sched: act_ct: fix lockdep splat in tcf_ct_flow_table_get
Convert zones_lock spinlock to zones_mutex mutex,
and struct (tcf_ct_flow_table)->ref to a refcount,
so that control path can use regular GFP_KERNEL allocations
from standard process context. This is more robust
in case of memory pressure.
The refcount is needed because tcf_ct_flow_table_put() can
be called from RCU callback, thus in BH context.
The issue was spotted by syzbot, as rhashtable_init()
was called with a spinlock held, which is bad since GFP_KERNEL
allocations can sleep.
Note to developers : Please make sure your patches are tested
with CONFIG_DEBUG_ATOMIC_SLEEP=y
BUG: sleeping function called from invalid context at mm/slab.h:565
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 9582, name: syz-executor610
2 locks held by syz-executor610/9582:
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnl_lock net/core/rtnetlink.c:72 [inline]
#0: ffffffff8a34eb80 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x3f9/0xad0 net/core/rtnetlink.c:5437
#1: ffffffff8a3961b8 (zones_lock){+...}, at: spin_lock_bh include/linux/spinlock.h:343 [inline]
#1: ffffffff8a3961b8 (zones_lock){+...}, at: tcf_ct_flow_table_get+0xa3/0x1700 net/sched/act_ct.c:67
Preemption disabled at:
[<0000000000000000>] 0x0
CPU: 0 PID: 9582 Comm: syz-executor610 Not tainted 5.6.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x188/0x20d lib/dump_stack.c:118
___might_sleep.cold+0x1f4/0x23d kernel/sched/core.c:6798
slab_pre_alloc_hook mm/slab.h:565 [inline]
slab_alloc_node mm/slab.c:3227 [inline]
kmem_cache_alloc_node_trace+0x272/0x790 mm/slab.c:3593
__do_kmalloc_node mm/slab.c:3615 [inline]
__kmalloc_node+0x38/0x60 mm/slab.c:3623
kmalloc_node include/linux/slab.h:578 [inline]
kvmalloc_node+0x61/0xf0 mm/util.c:574
kvmalloc include/linux/mm.h:645 [inline]
kvzalloc include/linux/mm.h:653 [inline]
bucket_table_alloc+0x8b/0x480 lib/rhashtable.c:175
rhashtable_init+0x3d2/0x750 lib/rhashtable.c:1054
nf_flow_table_init+0x16d/0x310 net/netfilter/nf_flow_table_core.c:498
tcf_ct_flow_table_get+0xe33/0x1700 net/sched/act_ct.c:82
tcf_ct_init+0xba4/0x18a6 net/sched/act_ct.c:1050
tcf_action_init_1+0x697/0xa20 net/sched/act_api.c:945
tcf_action_init+0x1e9/0x2f0 net/sched/act_api.c:1001
tcf_action_add+0xdb/0x370 net/sched/act_api.c:1411
tc_ctl_action+0x366/0x456 net/sched/act_api.c:1466
rtnetlink_rcv_msg+0x44e/0xad0 net/core/rtnetlink.c:5440
netlink_rcv_skb+0x15a/0x410 net/netlink/af_netlink.c:2478
netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline]
netlink_unicast+0x537/0x740 net/netlink/af_netlink.c:1329
netlink_sendmsg+0x882/0xe10 net/netlink/af_netlink.c:1918
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:672
____sys_sendmsg+0x6b9/0x7d0 net/socket.c:2343
___sys_sendmsg+0x100/0x170 net/socket.c:2397
__sys_sendmsg+0xec/0x1b0 net/socket.c:2430
do_syscall_64+0xf6/0x790 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4403d9
Code: 18 89 d0 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 fb 13 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007ffd719af218 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
RAX: ffffffffffffffda RBX: 00000000004002c8 RCX: 00000000004403d9
RDX: 0000000000000000 RSI: 0000000020000300 RDI: 0000000000000003
RBP: 00000000006ca018 R08: 0000000000000005 R09: 00000000004002c8
R10: 0000000000000008 R11: 00000000000
Fixes: c34b961a2492 ("net/sched: act_ct: Create nf flow table per zone")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Paul Blakey <paulb@mellanox.com>
Cc: Jiri Pirko <jiri@mellanox.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-09 05:27:48 +08:00
|
|
|
mutex_unlock(&zones_mutex);
|
2020-03-03 21:07:49 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-12-06 01:25:54 +08:00
|
|
|
static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
|
|
|
|
{
|
|
|
|
refcount_inc(&ct_ft->ref);
|
|
|
|
}
|
|
|
|
|
2020-03-03 21:07:49 +08:00
|
|
|
static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct tcf_ct_flow_table *ct_ft;
|
2021-07-02 17:21:38 +08:00
|
|
|
struct flow_block *block;
|
2020-03-03 21:07:49 +08:00
|
|
|
|
|
|
|
ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
|
|
|
|
rwork);
|
|
|
|
nf_flow_table_free(&ct_ft->nf_ft);
|
2021-07-02 17:21:38 +08:00
|
|
|
|
|
|
|
block = &ct_ft->nf_ft.flow_block;
|
|
|
|
down_write(&ct_ft->nf_ft.flow_block_lock);
|
2023-12-06 01:25:54 +08:00
|
|
|
WARN_ON(!list_empty(&block->cb_list));
|
2021-07-02 17:21:38 +08:00
|
|
|
up_write(&ct_ft->nf_ft.flow_block_lock);
|
2020-03-03 21:07:49 +08:00
|
|
|
kfree(ct_ft);
|
|
|
|
|
|
|
|
module_put(THIS_MODULE);
|
|
|
|
}
|
|
|
|
|
2022-11-07 04:34:16 +08:00
|
|
|
static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
|
2020-03-03 21:07:49 +08:00
|
|
|
{
|
2022-11-07 04:34:16 +08:00
|
|
|
if (refcount_dec_and_test(&ct_ft->ref)) {
|
2020-03-03 21:07:49 +08:00
|
|
|
rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
|
|
|
|
INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
|
|
|
|
queue_rcu_work(act_ct_wq, &ct_ft->rwork);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-28 17:23:49 +08:00
|
|
|
static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
|
|
|
|
struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
|
|
|
|
{
|
|
|
|
entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
|
|
|
|
entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
|
|
|
|
}
|
|
|
|
|
2023-11-03 23:14:10 +08:00
|
|
|
static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
|
|
|
|
{
|
|
|
|
struct nf_conn_act_ct_ext *act_ct_ext;
|
|
|
|
|
|
|
|
act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
|
|
|
|
if (act_ct_ext) {
|
|
|
|
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
|
|
|
|
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-03 21:07:50 +08:00
|
|
|
static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
|
|
|
|
struct nf_conn *ct,
|
2023-02-02 00:30:59 +08:00
|
|
|
bool tcp, bool bidirectional)
|
2020-03-03 21:07:50 +08:00
|
|
|
{
|
2022-01-03 19:44:50 +08:00
|
|
|
struct nf_conn_act_ct_ext *act_ct_ext;
|
2020-03-03 21:07:50 +08:00
|
|
|
struct flow_offload *entry;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
|
|
|
|
return;
|
|
|
|
|
|
|
|
entry = flow_offload_alloc(ct);
|
|
|
|
if (!entry) {
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
goto err_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tcp) {
|
|
|
|
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
|
|
|
|
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
|
|
|
|
}
|
2023-02-02 00:30:59 +08:00
|
|
|
if (bidirectional)
|
|
|
|
__set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags);
|
2020-03-03 21:07:50 +08:00
|
|
|
|
2022-01-03 19:44:50 +08:00
|
|
|
act_ct_ext = nf_conn_act_ct_ext_find(ct);
|
|
|
|
if (act_ct_ext) {
|
2022-02-28 17:23:49 +08:00
|
|
|
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
|
|
|
|
tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
|
2022-01-03 19:44:50 +08:00
|
|
|
}
|
|
|
|
|
2020-03-03 21:07:50 +08:00
|
|
|
err = flow_offload_add(&ct_ft->nf_ft, entry);
|
|
|
|
if (err)
|
|
|
|
goto err_add;
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err_add:
|
|
|
|
flow_offload_free(entry);
|
|
|
|
err_alloc:
|
|
|
|
clear_bit(IPS_OFFLOAD_BIT, &ct->status);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
|
|
|
|
struct nf_conn *ct,
|
|
|
|
enum ip_conntrack_info ctinfo)
|
|
|
|
{
|
2023-02-02 00:30:59 +08:00
|
|
|
bool tcp = false, bidirectional = true;
|
2020-03-03 21:07:50 +08:00
|
|
|
|
|
|
|
switch (nf_ct_protonum(ct)) {
|
|
|
|
case IPPROTO_TCP:
|
2023-02-02 00:30:59 +08:00
|
|
|
if ((ctinfo != IP_CT_ESTABLISHED &&
|
|
|
|
ctinfo != IP_CT_ESTABLISHED_REPLY) ||
|
|
|
|
!test_bit(IPS_ASSURED_BIT, &ct->status) ||
|
|
|
|
ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
|
2020-03-03 21:07:50 +08:00
|
|
|
return;
|
2023-02-02 00:30:59 +08:00
|
|
|
|
|
|
|
tcp = true;
|
2020-03-03 21:07:50 +08:00
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
2023-02-02 00:30:59 +08:00
|
|
|
if (!nf_ct_is_confirmed(ct))
|
|
|
|
return;
|
|
|
|
if (!test_bit(IPS_ASSURED_BIT, &ct->status))
|
|
|
|
bidirectional = false;
|
2020-03-03 21:07:50 +08:00
|
|
|
break;
|
2022-02-25 09:53:08 +08:00
|
|
|
#ifdef CONFIG_NF_CT_PROTO_GRE
|
|
|
|
case IPPROTO_GRE: {
|
|
|
|
struct nf_conntrack_tuple *tuple;
|
|
|
|
|
2023-02-02 00:30:59 +08:00
|
|
|
if ((ctinfo != IP_CT_ESTABLISHED &&
|
|
|
|
ctinfo != IP_CT_ESTABLISHED_REPLY) ||
|
|
|
|
!test_bit(IPS_ASSURED_BIT, &ct->status) ||
|
|
|
|
ct->status & IPS_NAT_MASK)
|
2022-02-25 09:53:08 +08:00
|
|
|
return;
|
2023-02-02 00:30:59 +08:00
|
|
|
|
2022-02-25 09:53:08 +08:00
|
|
|
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
|
|
|
|
/* No support for GRE v1 */
|
|
|
|
if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
|
|
|
|
return;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
2020-03-03 21:07:50 +08:00
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
|
|
|
|
ct->status & IPS_SEQ_ADJUST)
|
|
|
|
return;
|
|
|
|
|
2023-02-02 00:30:59 +08:00
|
|
|
tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional);
|
2020-03-03 21:07:50 +08:00
|
|
|
}
|
|
|
|
|
2020-03-03 21:07:51 +08:00
|
|
|
static bool
|
|
|
|
tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
|
2020-03-04 19:49:38 +08:00
|
|
|
struct flow_offload_tuple *tuple,
|
|
|
|
struct tcphdr **tcph)
|
2020-03-03 21:07:51 +08:00
|
|
|
{
|
|
|
|
struct flow_ports *ports;
|
|
|
|
unsigned int thoff;
|
|
|
|
struct iphdr *iph;
|
2022-02-25 09:53:08 +08:00
|
|
|
size_t hdrsize;
|
|
|
|
u8 ipproto;
|
2020-03-03 21:07:51 +08:00
|
|
|
|
2020-03-04 19:49:39 +08:00
|
|
|
if (!pskb_network_may_pull(skb, sizeof(*iph)))
|
2020-03-03 21:07:51 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
iph = ip_hdr(skb);
|
|
|
|
thoff = iph->ihl * 4;
|
|
|
|
|
|
|
|
if (ip_is_fragment(iph) ||
|
|
|
|
unlikely(thoff != sizeof(struct iphdr)))
|
|
|
|
return false;
|
|
|
|
|
2022-02-25 09:53:08 +08:00
|
|
|
ipproto = iph->protocol;
|
|
|
|
switch (ipproto) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
hdrsize = sizeof(struct tcphdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
hdrsize = sizeof(*ports);
|
|
|
|
break;
|
|
|
|
#ifdef CONFIG_NF_CT_PROTO_GRE
|
|
|
|
case IPPROTO_GRE:
|
|
|
|
hdrsize = sizeof(struct gre_base_hdr);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
2020-03-03 21:07:51 +08:00
|
|
|
return false;
|
2022-02-25 09:53:08 +08:00
|
|
|
}
|
2020-03-03 21:07:51 +08:00
|
|
|
|
|
|
|
if (iph->ttl <= 1)
|
|
|
|
return false;
|
|
|
|
|
2022-02-25 09:53:08 +08:00
|
|
|
if (!pskb_network_may_pull(skb, thoff + hdrsize))
|
2020-03-03 21:07:51 +08:00
|
|
|
return false;
|
|
|
|
|
2022-02-25 09:53:08 +08:00
|
|
|
switch (ipproto) {
|
|
|
|
case IPPROTO_TCP:
|
2020-03-04 19:49:38 +08:00
|
|
|
*tcph = (void *)(skb_network_header(skb) + thoff);
|
2022-02-25 09:53:08 +08:00
|
|
|
fallthrough;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
|
|
|
|
tuple->src_port = ports->source;
|
|
|
|
tuple->dst_port = ports->dest;
|
|
|
|
break;
|
|
|
|
case IPPROTO_GRE: {
|
|
|
|
struct gre_base_hdr *greh;
|
|
|
|
|
|
|
|
greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
|
|
|
|
if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
iph = ip_hdr(skb);
|
2020-03-03 21:07:51 +08:00
|
|
|
|
|
|
|
tuple->src_v4.s_addr = iph->saddr;
|
|
|
|
tuple->dst_v4.s_addr = iph->daddr;
|
|
|
|
tuple->l3proto = AF_INET;
|
2022-02-25 09:53:08 +08:00
|
|
|
tuple->l4proto = ipproto;
|
2020-03-03 21:07:51 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
|
2020-03-04 19:49:38 +08:00
|
|
|
struct flow_offload_tuple *tuple,
|
|
|
|
struct tcphdr **tcph)
|
2020-03-03 21:07:51 +08:00
|
|
|
{
|
|
|
|
struct flow_ports *ports;
|
|
|
|
struct ipv6hdr *ip6h;
|
|
|
|
unsigned int thoff;
|
2022-02-25 09:53:08 +08:00
|
|
|
size_t hdrsize;
|
|
|
|
u8 nexthdr;
|
2020-03-03 21:07:51 +08:00
|
|
|
|
2020-03-04 19:49:39 +08:00
|
|
|
if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
|
2020-03-03 21:07:51 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
ip6h = ipv6_hdr(skb);
|
2022-02-25 09:53:08 +08:00
|
|
|
thoff = sizeof(*ip6h);
|
2020-03-03 21:07:51 +08:00
|
|
|
|
2022-02-25 09:53:08 +08:00
|
|
|
nexthdr = ip6h->nexthdr;
|
|
|
|
switch (nexthdr) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
hdrsize = sizeof(struct tcphdr);
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
hdrsize = sizeof(*ports);
|
|
|
|
break;
|
|
|
|
#ifdef CONFIG_NF_CT_PROTO_GRE
|
|
|
|
case IPPROTO_GRE:
|
|
|
|
hdrsize = sizeof(struct gre_base_hdr);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
2022-05-31 20:10:05 +08:00
|
|
|
return false;
|
2022-02-25 09:53:08 +08:00
|
|
|
}
|
2020-03-03 21:07:51 +08:00
|
|
|
|
|
|
|
if (ip6h->hop_limit <= 1)
|
|
|
|
return false;
|
|
|
|
|
2022-02-25 09:53:08 +08:00
|
|
|
if (!pskb_network_may_pull(skb, thoff + hdrsize))
|
2020-03-03 21:07:51 +08:00
|
|
|
return false;
|
|
|
|
|
2022-02-25 09:53:08 +08:00
|
|
|
switch (nexthdr) {
|
|
|
|
case IPPROTO_TCP:
|
2020-03-04 19:49:38 +08:00
|
|
|
*tcph = (void *)(skb_network_header(skb) + thoff);
|
2022-02-25 09:53:08 +08:00
|
|
|
fallthrough;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
|
|
|
|
tuple->src_port = ports->source;
|
|
|
|
tuple->dst_port = ports->dest;
|
|
|
|
break;
|
|
|
|
case IPPROTO_GRE: {
|
|
|
|
struct gre_base_hdr *greh;
|
|
|
|
|
|
|
|
greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
|
|
|
|
if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ip6h = ipv6_hdr(skb);
|
2020-03-03 21:07:51 +08:00
|
|
|
|
|
|
|
tuple->src_v6 = ip6h->saddr;
|
|
|
|
tuple->dst_v6 = ip6h->daddr;
|
|
|
|
tuple->l3proto = AF_INET6;
|
2022-02-25 09:53:08 +08:00
|
|
|
tuple->l4proto = nexthdr;
|
2020-03-03 21:07:51 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
u8 family)
|
|
|
|
{
|
|
|
|
struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
|
|
|
|
struct flow_offload_tuple_rhash *tuplehash;
|
|
|
|
struct flow_offload_tuple tuple = {};
|
|
|
|
enum ip_conntrack_info ctinfo;
|
2020-03-04 19:49:38 +08:00
|
|
|
struct tcphdr *tcph = NULL;
|
2023-06-09 20:22:59 +08:00
|
|
|
bool force_refresh = false;
|
2020-03-03 21:07:51 +08:00
|
|
|
struct flow_offload *flow;
|
|
|
|
struct nf_conn *ct;
|
|
|
|
u8 dir;
|
|
|
|
|
|
|
|
switch (family) {
|
|
|
|
case NFPROTO_IPV4:
|
2020-03-04 19:49:38 +08:00
|
|
|
if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
|
2020-03-03 21:07:51 +08:00
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case NFPROTO_IPV6:
|
2020-03-04 19:49:38 +08:00
|
|
|
if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
|
2020-03-03 21:07:51 +08:00
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tuplehash = flow_offload_lookup(nf_ft, &tuple);
|
|
|
|
if (!tuplehash)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
dir = tuplehash->tuple.dir;
|
|
|
|
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
|
|
|
|
ct = flow->ct;
|
|
|
|
|
2023-02-02 00:30:59 +08:00
|
|
|
if (dir == FLOW_OFFLOAD_DIR_REPLY &&
|
|
|
|
!test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) {
|
|
|
|
/* Only offload reply direction after connection became
|
|
|
|
* assured.
|
|
|
|
*/
|
|
|
|
if (test_bit(IPS_ASSURED_BIT, &ct->status))
|
|
|
|
set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
|
|
|
|
else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags))
|
|
|
|
/* If flow_table flow has already been updated to the
|
|
|
|
* established state, then don't refresh.
|
|
|
|
*/
|
|
|
|
return false;
|
2023-06-09 20:22:59 +08:00
|
|
|
force_refresh = true;
|
2023-02-02 00:30:59 +08:00
|
|
|
}
|
|
|
|
|
2020-03-04 19:49:38 +08:00
|
|
|
if (tcph && (unlikely(tcph->fin || tcph->rst))) {
|
|
|
|
flow_offload_teardown(flow);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-02-02 00:30:59 +08:00
|
|
|
if (dir == FLOW_OFFLOAD_DIR_ORIGINAL)
|
|
|
|
ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
|
|
|
|
IP_CT_ESTABLISHED : IP_CT_NEW;
|
|
|
|
else
|
|
|
|
ctinfo = IP_CT_ESTABLISHED_REPLY;
|
2020-03-03 21:07:51 +08:00
|
|
|
|
2023-11-03 23:14:10 +08:00
|
|
|
nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
|
|
|
|
tcf_ct_flow_ct_ext_ifidx_update(flow);
|
2023-06-09 20:22:59 +08:00
|
|
|
flow_offload_refresh(nf_ft, flow, force_refresh);
|
|
|
|
if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
|
|
|
|
/* Process this flow in SW to allow promoting to ASSURED */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-03-03 21:07:51 +08:00
|
|
|
nf_conntrack_get(&ct->ct_general);
|
|
|
|
nf_ct_set(skb, ct, ctinfo);
|
2020-11-27 02:40:49 +08:00
|
|
|
if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
|
|
|
|
nf_ct_acct_update(ct, dir, skb->len);
|
2020-03-03 21:07:51 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-03-03 21:07:49 +08:00
|
|
|
static int tcf_ct_flow_tables_init(void)
|
|
|
|
{
|
|
|
|
return rhashtable_init(&zones_ht, &zones_params);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcf_ct_flow_tables_uninit(void)
|
|
|
|
{
|
|
|
|
rhashtable_destroy(&zones_ht);
|
|
|
|
}
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
static struct tc_action_ops act_ct_ops;
|
|
|
|
|
|
|
|
struct tc_ct_action_net {
|
|
|
|
struct tc_action_net tn; /* Must be first */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
|
|
|
|
static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
|
2022-11-07 04:34:17 +08:00
|
|
|
struct tcf_ct_params *p)
|
2019-07-09 15:30:48 +08:00
|
|
|
{
|
|
|
|
enum ip_conntrack_info ctinfo;
|
|
|
|
struct nf_conn *ct;
|
|
|
|
|
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
|
|
if (!ct)
|
|
|
|
return false;
|
|
|
|
if (!net_eq(net, read_pnet(&ct->ct_net)))
|
2022-03-25 03:22:10 +08:00
|
|
|
goto drop_ct;
|
2022-11-07 04:34:17 +08:00
|
|
|
if (nf_ct_zone(ct)->id != p->zone)
|
2022-03-25 03:22:10 +08:00
|
|
|
goto drop_ct;
|
2022-11-07 04:34:17 +08:00
|
|
|
if (p->helper) {
|
|
|
|
struct nf_conn_help *help;
|
|
|
|
|
|
|
|
help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
|
|
|
|
if (help && rcu_access_pointer(help->helper) != p->helper)
|
|
|
|
goto drop_ct;
|
|
|
|
}
|
2019-07-09 15:30:48 +08:00
|
|
|
|
|
|
|
/* Force conntrack entry direction. */
|
2022-11-07 04:34:17 +08:00
|
|
|
if ((p->ct_action & TCA_CT_ACT_FORCE) &&
|
|
|
|
CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
|
2019-07-09 15:30:48 +08:00
|
|
|
if (nf_ct_is_confirmed(ct))
|
|
|
|
nf_ct_kill(ct);
|
|
|
|
|
2022-03-25 03:22:10 +08:00
|
|
|
goto drop_ct;
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2022-03-25 03:22:10 +08:00
|
|
|
|
|
|
|
drop_ct:
|
|
|
|
nf_ct_put(ct);
|
|
|
|
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
|
|
|
|
|
|
|
|
return false;
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
u8 family = NFPROTO_UNSPEC;
|
|
|
|
|
sched: consistently handle layer3 header accesses in the presence of VLANs
There are a couple of places in net/sched/ that check skb->protocol and act
on the value there. However, in the presence of VLAN tags, the value stored
in skb->protocol can be inconsistent based on whether VLAN acceleration is
enabled. The commit quoted in the Fixes tag below fixed the users of
skb->protocol to use a helper that will always see the VLAN ethertype.
However, most of the callers don't actually handle the VLAN ethertype, but
expect to find the IP header type in the protocol field. This means that
things like changing the ECN field, or parsing diffserv values, stops
working if there's a VLAN tag, or if there are multiple nested VLAN
tags (QinQ).
To fix this, change the helper to take an argument that indicates whether
the caller wants to skip the VLAN tags or not. When skipping VLAN tags, we
make sure to skip all of them, so behaviour is consistent even in QinQ
mode.
To make the helper usable from the ECN code, move it to if_vlan.h instead
of pkt_sched.h.
v3:
- Remove empty lines
- Move vlan variable definitions inside loop in skb_protocol()
- Also use skb_protocol() helper in IP{,6}_ECN_decapsulate() and
bpf_skb_ecn_set_ce()
v2:
- Use eth_type_vlan() helper in skb_protocol()
- Also fix code that reads skb->protocol directly
- Change a couple of 'if/else if' statements to switch constructs to avoid
calling the helper twice
Reported-by: Ilya Ponetayev <i.ponetaev@ndmsystems.com>
Fixes: d8b9605d2697 ("net: sched: fix skb->protocol use in case of accelerated vlan path")
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-07-04 04:26:43 +08:00
|
|
|
switch (skb_protocol(skb, true)) {
|
2019-07-09 15:30:48 +08:00
|
|
|
case htons(ETH_P_IP):
|
|
|
|
family = NFPROTO_IPV4;
|
|
|
|
break;
|
|
|
|
case htons(ETH_P_IPV6):
|
|
|
|
family = NFPROTO_IPV6;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return family;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
|
|
|
|
{
|
|
|
|
unsigned int len;
|
|
|
|
|
|
|
|
len = skb_network_offset(skb) + sizeof(struct iphdr);
|
|
|
|
if (unlikely(skb->len < len))
|
|
|
|
return -EINVAL;
|
|
|
|
if (unlikely(!pskb_may_pull(skb, len)))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
*frag = ip_is_fragment(ip_hdr(skb));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
|
|
|
|
{
|
|
|
|
unsigned int flags = 0, len, payload_ofs = 0;
|
|
|
|
unsigned short frag_off;
|
|
|
|
int nexthdr;
|
|
|
|
|
|
|
|
len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
|
|
|
|
if (unlikely(skb->len < len))
|
|
|
|
return -EINVAL;
|
|
|
|
if (unlikely(!pskb_may_pull(skb, len)))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
|
|
|
|
if (unlikely(nexthdr < 0))
|
|
|
|
return -EPROTO;
|
|
|
|
|
|
|
|
*frag = flags & IP6_FH_F_FRAG;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-02-08 06:52:09 +08:00
|
|
|
static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
|
|
|
|
u8 family, u16 zone, bool *defrag)
|
|
|
|
{
|
|
|
|
enum ip_conntrack_info ctinfo;
|
|
|
|
struct nf_conn *ct;
|
|
|
|
int err = 0;
|
|
|
|
bool frag;
|
2023-02-08 06:52:10 +08:00
|
|
|
u8 proto;
|
2023-02-08 06:52:09 +08:00
|
|
|
u16 mru;
|
|
|
|
|
|
|
|
/* Previously seen (loopback)? Ignore. */
|
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
|
|
if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (family == NFPROTO_IPV4)
|
|
|
|
err = tcf_ct_ipv4_is_fragment(skb, &frag);
|
|
|
|
else
|
|
|
|
err = tcf_ct_ipv6_is_fragment(skb, &frag);
|
|
|
|
if (err || !frag)
|
|
|
|
return err;
|
|
|
|
|
2023-02-08 06:52:10 +08:00
|
|
|
err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
|
2023-02-08 06:52:09 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
*defrag = true;
|
|
|
|
tc_skb_cb(skb)->mru = mru;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-07 04:34:16 +08:00
|
|
|
static void tcf_ct_params_free(struct tcf_ct_params *params)
|
2019-07-09 15:30:48 +08:00
|
|
|
{
|
2022-11-07 04:34:17 +08:00
|
|
|
if (params->helper) {
|
|
|
|
#if IS_ENABLED(CONFIG_NF_NAT)
|
|
|
|
if (params->ct_action & TCA_CT_ACT_NAT)
|
|
|
|
nf_nat_helper_put(params->helper);
|
|
|
|
#endif
|
|
|
|
nf_conntrack_helper_put(params->helper);
|
|
|
|
}
|
2022-11-07 04:34:16 +08:00
|
|
|
if (params->ct_ft)
|
|
|
|
tcf_ct_flow_table_put(params->ct_ft);
|
2023-10-24 19:05:51 +08:00
|
|
|
if (params->tmpl) {
|
|
|
|
if (params->put_labels)
|
|
|
|
nf_connlabels_put(nf_ct_net(params->tmpl));
|
|
|
|
|
2022-01-07 12:03:26 +08:00
|
|
|
nf_ct_put(params->tmpl);
|
2023-10-24 19:05:51 +08:00
|
|
|
}
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
kfree(params);
|
|
|
|
}
|
|
|
|
|
2022-11-07 04:34:16 +08:00
|
|
|
static void tcf_ct_params_free_rcu(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct tcf_ct_params *params;
|
|
|
|
|
|
|
|
params = container_of(head, struct tcf_ct_params, rcu);
|
|
|
|
tcf_ct_params_free(params);
|
|
|
|
}
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
|
|
|
|
u32 new_mark;
|
|
|
|
|
|
|
|
if (!mask)
|
|
|
|
return;
|
|
|
|
|
2022-11-10 03:39:07 +08:00
|
|
|
new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
|
|
|
|
if (READ_ONCE(ct->mark) != new_mark) {
|
|
|
|
WRITE_ONCE(ct->mark, new_mark);
|
2019-07-09 15:30:48 +08:00
|
|
|
if (nf_ct_is_confirmed(ct))
|
|
|
|
nf_conntrack_event_cache(IPCT_MARK, ct);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcf_ct_act_set_labels(struct nf_conn *ct,
|
|
|
|
u32 *labels,
|
|
|
|
u32 *labels_m)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
|
2019-12-10 02:31:43 +08:00
|
|
|
size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
|
2019-07-09 15:30:48 +08:00
|
|
|
|
|
|
|
if (!memchr_inv(labels_m, 0, labels_sz))
|
|
|
|
return;
|
|
|
|
|
|
|
|
nf_connlabels_replace(ct, labels, labels_m, 4);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_act_nat(struct sk_buff *skb,
|
|
|
|
struct nf_conn *ct,
|
|
|
|
enum ip_conntrack_info ctinfo,
|
|
|
|
int ct_action,
|
|
|
|
struct nf_nat_range2 *range,
|
|
|
|
bool commit)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_NF_NAT)
|
2022-12-09 00:56:12 +08:00
|
|
|
int err, action = 0;
|
2019-07-09 15:30:48 +08:00
|
|
|
|
|
|
|
if (!(ct_action & TCA_CT_ACT_NAT))
|
|
|
|
return NF_ACCEPT;
|
2022-12-09 00:56:12 +08:00
|
|
|
if (ct_action & TCA_CT_ACT_NAT_SRC)
|
|
|
|
action |= BIT(NF_NAT_MANIP_SRC);
|
|
|
|
if (ct_action & TCA_CT_ACT_NAT_DST)
|
|
|
|
action |= BIT(NF_NAT_MANIP_DST);
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2022-12-09 00:56:12 +08:00
|
|
|
err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
|
2024-07-04 19:29:20 +08:00
|
|
|
if (err != NF_ACCEPT)
|
|
|
|
return err & NF_VERDICT_MASK;
|
2022-12-09 00:56:12 +08:00
|
|
|
|
|
|
|
if (action & BIT(NF_NAT_MANIP_SRC))
|
|
|
|
tc_skb_cb(skb)->post_ct_snat = 1;
|
|
|
|
if (action & BIT(NF_NAT_MANIP_DST))
|
|
|
|
tc_skb_cb(skb)->post_ct_dnat = 1;
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2019-12-04 05:34:14 +08:00
|
|
|
return err;
|
2019-07-09 15:30:48 +08:00
|
|
|
#else
|
|
|
|
return NF_ACCEPT;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-12-06 21:55:12 +08:00
|
|
|
TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
|
|
|
|
struct tcf_result *res)
|
2019-07-09 15:30:48 +08:00
|
|
|
{
|
|
|
|
struct net *net = dev_net(skb->dev);
|
|
|
|
enum ip_conntrack_info ctinfo;
|
|
|
|
struct tcf_ct *c = to_ct(a);
|
|
|
|
struct nf_conn *tmpl = NULL;
|
|
|
|
struct nf_hook_state state;
|
2022-11-07 04:34:17 +08:00
|
|
|
bool cached, commit, clear;
|
2019-07-09 15:30:48 +08:00
|
|
|
int nh_ofs, err, retval;
|
|
|
|
struct tcf_ct_params *p;
|
2022-11-07 04:34:17 +08:00
|
|
|
bool add_helper = false;
|
2020-03-03 21:07:51 +08:00
|
|
|
bool skip_add = false;
|
2020-07-19 20:30:37 +08:00
|
|
|
bool defrag = false;
|
2019-07-09 15:30:48 +08:00
|
|
|
struct nf_conn *ct;
|
|
|
|
u8 family;
|
|
|
|
|
|
|
|
p = rcu_dereference_bh(c->params);
|
|
|
|
|
|
|
|
retval = READ_ONCE(c->tcf_action);
|
|
|
|
commit = p->ct_action & TCA_CT_ACT_COMMIT;
|
|
|
|
clear = p->ct_action & TCA_CT_ACT_CLEAR;
|
|
|
|
tmpl = p->tmpl;
|
|
|
|
|
2020-07-04 15:42:47 +08:00
|
|
|
tcf_lastuse_update(&c->tcf_tm);
|
2021-10-17 19:58:51 +08:00
|
|
|
tcf_action_update_bstats(&c->common, skb);
|
2020-07-04 15:42:47 +08:00
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
if (clear) {
|
2021-12-15 01:24:33 +08:00
|
|
|
tc_skb_cb(skb)->post_ct = false;
|
2019-07-09 15:30:48 +08:00
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
|
|
if (ct) {
|
2022-01-07 12:03:26 +08:00
|
|
|
nf_ct_put(ct);
|
2019-07-09 15:30:48 +08:00
|
|
|
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
|
|
|
|
}
|
|
|
|
|
net/sched: act_ct: clear post_ct if doing ct_clear
Invalid detection works with two distinct moments: act_ct tries to find
a conntrack entry and set post_ct true, indicating that that was
attempted. Then, when flow dissector tries to dissect CT info and no
entry is there, it knows that it was tried and no entry was found, and
synthesizes/sets
key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_INVALID;
mimicing what OVS does.
OVS has this a bit more streamlined, as it recomputes the key after
trying to find a conntrack entry for it.
Issue here is, when we have 'tc action ct clear', it didn't clear
post_ct, causing a subsequent match on 'ct_state -trk' to fail, due to
the above. The fix, thus, is to clear it.
Reproducer rules:
tc filter add dev enp130s0f0np0_0 ingress prio 1 chain 0 \
protocol ip flower ip_proto tcp ct_state -trk \
action ct zone 1 pipe \
action goto chain 2
tc filter add dev enp130s0f0np0_0 ingress prio 1 chain 2 \
protocol ip flower \
action ct clear pipe \
action goto chain 4
tc filter add dev enp130s0f0np0_0 ingress prio 1 chain 4 \
protocol ip flower ct_state -trk \
action mirred egress redirect dev enp130s0f1np1_0
With the fix, the 3rd rule matches, like it does with OVS kernel
datapath.
Fixes: 7baf2429a1a9 ("net/sched: cls_flower add CT_FLAGS_INVALID flag support")
Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: wenxu <wenxu@ucloud.cn>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-23 02:13:22 +08:00
|
|
|
goto out_clear;
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
family = tcf_ct_skb_nf_family(skb);
|
|
|
|
if (family == NFPROTO_UNSPEC)
|
|
|
|
goto drop;
|
|
|
|
|
|
|
|
/* The conntrack module expects to be working at L3.
|
|
|
|
* We also try to pull the IPv4/6 header to linear area
|
|
|
|
*/
|
|
|
|
nh_ofs = skb_network_offset(skb);
|
|
|
|
skb_pull_rcsum(skb, nh_ofs);
|
2020-07-19 20:30:37 +08:00
|
|
|
err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
|
2019-07-09 15:30:48 +08:00
|
|
|
if (err)
|
2023-12-28 16:14:57 +08:00
|
|
|
goto out_frag;
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2023-02-08 06:52:07 +08:00
|
|
|
err = nf_ct_skb_network_trim(skb, family);
|
2019-07-09 15:30:48 +08:00
|
|
|
if (err)
|
|
|
|
goto drop;
|
|
|
|
|
|
|
|
/* If we are recirculating packets to match on ct fields and
|
|
|
|
* committing with a separate ct action, then we don't need to
|
|
|
|
* actually run the packet through conntrack twice unless it's for a
|
|
|
|
* different zone.
|
|
|
|
*/
|
2022-11-07 04:34:17 +08:00
|
|
|
cached = tcf_ct_skb_nfct_cached(net, skb, p);
|
2019-07-09 15:30:48 +08:00
|
|
|
if (!cached) {
|
2021-05-26 19:44:09 +08:00
|
|
|
if (tcf_ct_flow_table_lookup(p, skb, family)) {
|
2020-03-03 21:07:51 +08:00
|
|
|
skip_add = true;
|
|
|
|
goto do_nat;
|
|
|
|
}
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
/* Associate skb with specified zone. */
|
|
|
|
if (tmpl) {
|
2021-04-28 14:05:32 +08:00
|
|
|
nf_conntrack_put(skb_nfct(skb));
|
2019-07-09 15:30:48 +08:00
|
|
|
nf_conntrack_get(&tmpl->ct_general);
|
|
|
|
nf_ct_set(skb, tmpl, IP_CT_NEW);
|
|
|
|
}
|
|
|
|
|
|
|
|
state.hook = NF_INET_PRE_ROUTING;
|
|
|
|
state.net = net;
|
|
|
|
state.pf = family;
|
|
|
|
err = nf_conntrack_in(skb, &state);
|
|
|
|
if (err != NF_ACCEPT)
|
2024-07-04 19:29:20 +08:00
|
|
|
goto nf_error;
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
2020-03-03 21:07:51 +08:00
|
|
|
do_nat:
|
2019-07-09 15:30:48 +08:00
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
|
|
if (!ct)
|
|
|
|
goto out_push;
|
|
|
|
nf_ct_deliver_cached_events(ct);
|
2022-01-03 19:44:50 +08:00
|
|
|
nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
|
2019-07-09 15:30:48 +08:00
|
|
|
|
|
|
|
err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
|
|
|
|
if (err != NF_ACCEPT)
|
2024-07-04 19:29:20 +08:00
|
|
|
goto nf_error;
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2022-11-07 04:34:17 +08:00
|
|
|
if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
|
|
|
|
err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
|
|
|
|
if (err)
|
|
|
|
goto drop;
|
|
|
|
add_helper = true;
|
|
|
|
if (p->ct_action & TCA_CT_ACT_NAT && !nfct_seqadj(ct)) {
|
|
|
|
if (!nfct_seqadj_ext_add(ct))
|
|
|
|
goto drop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
|
2024-07-04 19:29:20 +08:00
|
|
|
err = nf_ct_helper(skb, ct, ctinfo, family);
|
|
|
|
if (err != NF_ACCEPT)
|
|
|
|
goto nf_error;
|
2022-11-07 04:34:17 +08:00
|
|
|
}
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
if (commit) {
|
|
|
|
tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
|
|
|
|
tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
|
|
|
|
|
2022-01-03 19:44:50 +08:00
|
|
|
if (!nf_ct_is_confirmed(ct))
|
2023-11-03 23:14:10 +08:00
|
|
|
nf_conn_act_ct_ext_add(skb, ct, ctinfo);
|
2022-01-03 19:44:50 +08:00
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
/* This will take care of sending queued events
|
|
|
|
* even if the connection is already confirmed.
|
|
|
|
*/
|
2024-07-04 19:29:20 +08:00
|
|
|
err = nf_conntrack_confirm(skb);
|
|
|
|
if (err != NF_ACCEPT)
|
|
|
|
goto nf_error;
|
2024-07-10 13:37:47 +08:00
|
|
|
|
|
|
|
/* The ct may be dropped if a clash has been resolved,
|
|
|
|
* so it's necessary to retrieve it from skb again to
|
|
|
|
* prevent UAF.
|
|
|
|
*/
|
|
|
|
ct = nf_ct_get(skb, &ctinfo);
|
|
|
|
if (!ct)
|
|
|
|
skip_add = true;
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
2021-05-26 19:44:09 +08:00
|
|
|
if (!skip_add)
|
|
|
|
tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
out_push:
|
|
|
|
skb_push_rcsum(skb, nh_ofs);
|
|
|
|
|
2021-12-15 01:24:33 +08:00
|
|
|
tc_skb_cb(skb)->post_ct = true;
|
2021-12-15 01:24:34 +08:00
|
|
|
tc_skb_cb(skb)->zone = p->zone;
|
net/sched: act_ct: clear post_ct if doing ct_clear
Invalid detection works with two distinct moments: act_ct tries to find
a conntrack entry and set post_ct true, indicating that that was
attempted. Then, when flow dissector tries to dissect CT info and no
entry is there, it knows that it was tried and no entry was found, and
synthesizes/sets
key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_INVALID;
mimicing what OVS does.
OVS has this a bit more streamlined, as it recomputes the key after
trying to find a conntrack entry for it.
Issue here is, when we have 'tc action ct clear', it didn't clear
post_ct, causing a subsequent match on 'ct_state -trk' to fail, due to
the above. The fix, thus, is to clear it.
Reproducer rules:
tc filter add dev enp130s0f0np0_0 ingress prio 1 chain 0 \
protocol ip flower ip_proto tcp ct_state -trk \
action ct zone 1 pipe \
action goto chain 2
tc filter add dev enp130s0f0np0_0 ingress prio 1 chain 2 \
protocol ip flower \
action ct clear pipe \
action goto chain 4
tc filter add dev enp130s0f0np0_0 ingress prio 1 chain 4 \
protocol ip flower ct_state -trk \
action mirred egress redirect dev enp130s0f1np1_0
With the fix, the 3rd rule matches, like it does with OVS kernel
datapath.
Fixes: 7baf2429a1a9 ("net/sched: cls_flower add CT_FLAGS_INVALID flag support")
Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Reviewed-by: wenxu <wenxu@ucloud.cn>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-03-23 02:13:22 +08:00
|
|
|
out_clear:
|
2020-07-19 20:30:37 +08:00
|
|
|
if (defrag)
|
|
|
|
qdisc_skb_cb(skb)->pkt_len = skb->len;
|
2019-07-09 15:30:48 +08:00
|
|
|
return retval;
|
|
|
|
|
2023-12-28 16:14:57 +08:00
|
|
|
out_frag:
|
|
|
|
if (err != -EINPROGRESS)
|
|
|
|
tcf_action_inc_drop_qstats(&c->common);
|
|
|
|
return TC_ACT_CONSUMED;
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
drop:
|
2019-10-30 22:09:02 +08:00
|
|
|
tcf_action_inc_drop_qstats(&c->common);
|
2019-07-09 15:30:48 +08:00
|
|
|
return TC_ACT_SHOT;
|
2024-07-04 19:29:20 +08:00
|
|
|
|
|
|
|
nf_error:
|
|
|
|
/* some verdicts store extra data in upper bits, such
|
|
|
|
* as errno or queue number.
|
|
|
|
*/
|
|
|
|
switch (err & NF_VERDICT_MASK) {
|
|
|
|
case NF_DROP:
|
|
|
|
goto drop;
|
|
|
|
case NF_STOLEN:
|
|
|
|
tcf_action_inc_drop_qstats(&c->common);
|
|
|
|
return TC_ACT_CONSUMED;
|
|
|
|
default:
|
|
|
|
DEBUG_NET_WARN_ON_ONCE(1);
|
|
|
|
goto drop;
|
|
|
|
}
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
|
|
|
|
[TCA_CT_ACTION] = { .type = NLA_U16 },
|
2020-08-18 16:17:31 +08:00
|
|
|
[TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
|
2019-07-09 15:30:48 +08:00
|
|
|
[TCA_CT_ZONE] = { .type = NLA_U16 },
|
|
|
|
[TCA_CT_MARK] = { .type = NLA_U32 },
|
|
|
|
[TCA_CT_MARK_MASK] = { .type = NLA_U32 },
|
|
|
|
[TCA_CT_LABELS] = { .type = NLA_BINARY,
|
|
|
|
.len = 128 / BITS_PER_BYTE },
|
|
|
|
[TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
|
|
|
|
.len = 128 / BITS_PER_BYTE },
|
|
|
|
[TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
|
|
|
|
[TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
|
2020-08-18 16:17:31 +08:00
|
|
|
[TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
|
|
|
|
[TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
|
2019-07-09 15:30:48 +08:00
|
|
|
[TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
|
|
|
|
[TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
|
2022-11-07 04:34:17 +08:00
|
|
|
[TCA_CT_HELPER_NAME] = { .type = NLA_STRING, .len = NF_CT_HELPER_NAME_LEN },
|
|
|
|
[TCA_CT_HELPER_FAMILY] = { .type = NLA_U8 },
|
|
|
|
[TCA_CT_HELPER_PROTO] = { .type = NLA_U8 },
|
2019-07-09 15:30:48 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
|
|
|
|
struct tc_ct *parm,
|
|
|
|
struct nlattr **tb,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct nf_nat_range2 *range;
|
|
|
|
|
|
|
|
if (!(p->ct_action & TCA_CT_ACT_NAT))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_NF_NAT)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
|
|
|
|
(p->ct_action & TCA_CT_ACT_NAT_DST)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
range = &p->range;
|
|
|
|
if (tb[TCA_CT_NAT_IPV4_MIN]) {
|
|
|
|
struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
|
|
|
|
|
|
|
|
p->ipv4_range = true;
|
|
|
|
range->flags |= NF_NAT_RANGE_MAP_IPS;
|
|
|
|
range->min_addr.ip =
|
|
|
|
nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
|
|
|
|
|
|
|
|
range->max_addr.ip = max_attr ?
|
|
|
|
nla_get_in_addr(max_attr) :
|
|
|
|
range->min_addr.ip;
|
|
|
|
} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
|
|
|
|
struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
|
|
|
|
|
|
|
|
p->ipv4_range = false;
|
|
|
|
range->flags |= NF_NAT_RANGE_MAP_IPS;
|
|
|
|
range->min_addr.in6 =
|
|
|
|
nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
|
|
|
|
|
|
|
|
range->max_addr.in6 = max_attr ?
|
|
|
|
nla_get_in6_addr(max_attr) :
|
|
|
|
range->min_addr.in6;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb[TCA_CT_NAT_PORT_MIN]) {
|
|
|
|
range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
|
|
|
|
range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
|
|
|
|
|
|
|
|
range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
|
|
|
|
nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
|
|
|
|
range->min_proto.all;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcf_ct_set_key_val(struct nlattr **tb,
|
|
|
|
void *val, int val_type,
|
|
|
|
void *mask, int mask_type,
|
|
|
|
int len)
|
|
|
|
{
|
|
|
|
if (!tb[val_type])
|
|
|
|
return;
|
|
|
|
nla_memcpy(val, tb[val_type], len);
|
|
|
|
|
|
|
|
if (!mask)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
|
|
|
|
memset(mask, 0xff, len);
|
|
|
|
else
|
|
|
|
nla_memcpy(mask, tb[mask_type], len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_fill_params(struct net *net,
|
|
|
|
struct tcf_ct_params *p,
|
|
|
|
struct tc_ct *parm,
|
|
|
|
struct nlattr **tb,
|
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct nf_conntrack_zone zone;
|
2022-11-07 04:34:17 +08:00
|
|
|
int err, family, proto, len;
|
2023-10-24 19:05:51 +08:00
|
|
|
bool put_labels = false;
|
2019-07-09 15:30:48 +08:00
|
|
|
struct nf_conn *tmpl;
|
2022-11-07 04:34:17 +08:00
|
|
|
char *name;
|
2019-07-09 15:30:48 +08:00
|
|
|
|
|
|
|
p->zone = NF_CT_DEFAULT_ZONE_ID;
|
|
|
|
|
|
|
|
tcf_ct_set_key_val(tb,
|
|
|
|
&p->ct_action, TCA_CT_ACTION,
|
|
|
|
NULL, TCA_CT_UNSPEC,
|
|
|
|
sizeof(p->ct_action));
|
|
|
|
|
|
|
|
if (p->ct_action & TCA_CT_ACT_CLEAR)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = tcf_ct_fill_params_nat(p, parm, tb, extack);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (tb[TCA_CT_MARK]) {
|
|
|
|
if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
tcf_ct_set_key_val(tb,
|
|
|
|
&p->mark, TCA_CT_MARK,
|
|
|
|
&p->mark_mask, TCA_CT_MARK_MASK,
|
|
|
|
sizeof(p->mark));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb[TCA_CT_LABELS]) {
|
2023-10-24 19:05:51 +08:00
|
|
|
unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2023-10-24 19:05:51 +08:00
|
|
|
if (nf_connlabels_get(net, n_bits - 1)) {
|
2019-07-09 15:30:48 +08:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
|
|
|
|
return -EOPNOTSUPP;
|
2023-10-24 19:05:51 +08:00
|
|
|
} else {
|
|
|
|
put_labels = true;
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
2023-10-24 19:05:51 +08:00
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
tcf_ct_set_key_val(tb,
|
|
|
|
p->labels, TCA_CT_LABELS,
|
|
|
|
p->labels_mask, TCA_CT_LABELS_MASK,
|
|
|
|
sizeof(p->labels));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tb[TCA_CT_ZONE]) {
|
|
|
|
if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcf_ct_set_key_val(tb,
|
|
|
|
&p->zone, TCA_CT_ZONE,
|
|
|
|
NULL, TCA_CT_UNSPEC,
|
|
|
|
sizeof(p->zone));
|
|
|
|
}
|
|
|
|
|
|
|
|
nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
|
|
|
|
tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
|
|
|
|
if (!tmpl) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
p->tmpl = tmpl;
|
2022-11-07 04:34:17 +08:00
|
|
|
if (tb[TCA_CT_HELPER_NAME]) {
|
|
|
|
name = nla_data(tb[TCA_CT_HELPER_NAME]);
|
|
|
|
len = nla_len(tb[TCA_CT_HELPER_NAME]);
|
|
|
|
if (len > 16 || name[len - 1] != '\0') {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to parse helper name.");
|
|
|
|
err = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
|
|
|
|
proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
|
|
|
|
err = nf_ct_add_helper(tmpl, name, family, proto,
|
|
|
|
p->ct_action & TCA_CT_ACT_NAT, &p->helper);
|
|
|
|
if (err) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Failed to add helper");
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2023-10-24 19:05:51 +08:00
|
|
|
p->put_labels = put_labels;
|
|
|
|
|
net: sched: set IPS_CONFIRMED in tmpl status only when commit is set in act_ct
With the following flows, the packets will be dropped if OVS TC offload is
enabled.
'ip,ct_state=-trk,in_port=1 actions=ct(zone=1)'
'ip,ct_state=+trk+new+rel,in_port=1 actions=ct(commit,zone=1)'
'ip,ct_state=+trk+new+rel,in_port=1 actions=ct(commit,zone=2),normal'
In the 1st flow, it finds the exp from the hashtable and removes it then
creates the ct with this exp in act_ct. However, in the 2nd flow it goes
to the OVS upcall at the 1st time. When the skb comes back from userspace,
it has to create the ct again without exp(the exp was removed last time).
With no 'rel' set in the ct, the 3rd flow can never get matched.
In OVS conntrack, it works around it by adding its own exp lookup function
ovs_ct_expect_find() where it doesn't remove the exp. Instead of creating
a real ct, it only updates its keys with the exp and its master info. So
when the skb comes back, the exp is still in the hashtable.
However, we can't do this trick in act_ct, as tc flower match is using a
real ct, and passing the exp and its master info to flower parsing via
tc_skb_cb is also not possible (tc_skb_cb size is not big enough).
The simple and clear fix is to not remove the exp at the 1st flow, namely,
not set IPS_CONFIRMED in tmpl when commit is not set in act_ct.
Reported-by: Shuang Li <shuali@redhat.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Aaron Conole <aconole@redhat.com>
Reviewed-by: Davide Caratti <dcaratti@redhat.com>
Acked-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
2023-07-17 05:09:18 +08:00
|
|
|
if (p->ct_action & TCA_CT_ACT_COMMIT)
|
|
|
|
__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
|
2019-07-09 15:30:48 +08:00
|
|
|
return 0;
|
2022-11-07 04:34:17 +08:00
|
|
|
err:
|
2023-10-24 19:05:51 +08:00
|
|
|
if (put_labels)
|
|
|
|
nf_connlabels_put(net);
|
|
|
|
|
2022-11-07 04:34:17 +08:00
|
|
|
nf_ct_put(p->tmpl);
|
|
|
|
p->tmpl = NULL;
|
|
|
|
return err;
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_init(struct net *net, struct nlattr *nla,
|
|
|
|
struct nlattr *est, struct tc_action **a,
|
2019-10-30 22:09:05 +08:00
|
|
|
struct tcf_proto *tp, u32 flags,
|
2019-07-09 15:30:48 +08:00
|
|
|
struct netlink_ext_ack *extack)
|
|
|
|
{
|
2022-09-08 12:14:33 +08:00
|
|
|
struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id);
|
2021-07-30 07:12:14 +08:00
|
|
|
bool bind = flags & TCA_ACT_FLAGS_BIND;
|
2019-07-09 15:30:48 +08:00
|
|
|
struct tcf_ct_params *params = NULL;
|
|
|
|
struct nlattr *tb[TCA_CT_MAX + 1];
|
|
|
|
struct tcf_chain *goto_ch = NULL;
|
|
|
|
struct tc_ct *parm;
|
|
|
|
struct tcf_ct *c;
|
|
|
|
int err, res = 0;
|
2019-08-01 21:02:51 +08:00
|
|
|
u32 index;
|
2019-07-09 15:30:48 +08:00
|
|
|
|
|
|
|
if (!nla) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!tb[TCA_CT_PARMS]) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
parm = nla_data(tb[TCA_CT_PARMS]);
|
2019-08-01 21:02:51 +08:00
|
|
|
index = parm->index;
|
|
|
|
err = tcf_idr_check_alloc(tn, &index, a, bind);
|
2019-07-09 15:30:48 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!err) {
|
2019-10-30 22:09:06 +08:00
|
|
|
err = tcf_idr_create_from_flags(tn, index, est, a,
|
|
|
|
&act_ct_ops, bind, flags);
|
2019-07-09 15:30:48 +08:00
|
|
|
if (err) {
|
2019-08-01 21:02:51 +08:00
|
|
|
tcf_idr_cleanup(tn, index);
|
2019-07-09 15:30:48 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
res = ACT_P_CREATED;
|
|
|
|
} else {
|
|
|
|
if (bind)
|
2023-12-29 21:26:41 +08:00
|
|
|
return ACT_P_BOUND;
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2021-07-30 07:12:14 +08:00
|
|
|
if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
|
2019-07-09 15:30:48 +08:00
|
|
|
tcf_idr_release(*a, bind);
|
|
|
|
return -EEXIST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
|
|
|
|
if (err < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
c = to_ct(*a);
|
|
|
|
|
|
|
|
params = kzalloc(sizeof(*params), GFP_KERNEL);
|
|
|
|
if (unlikely(!params)) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = tcf_ct_fill_params(net, params, parm, tb, extack);
|
|
|
|
if (err)
|
|
|
|
goto cleanup;
|
|
|
|
|
2022-06-15 18:43:54 +08:00
|
|
|
err = tcf_ct_flow_table_get(net, params);
|
2020-03-03 21:07:49 +08:00
|
|
|
if (err)
|
2022-11-07 04:34:16 +08:00
|
|
|
goto cleanup;
|
2020-03-03 21:07:49 +08:00
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
spin_lock_bh(&c->tcf_lock);
|
|
|
|
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
|
2019-09-24 07:09:18 +08:00
|
|
|
params = rcu_replace_pointer(c->params, params,
|
|
|
|
lockdep_is_held(&c->tcf_lock));
|
2019-07-09 15:30:48 +08:00
|
|
|
spin_unlock_bh(&c->tcf_lock);
|
|
|
|
|
|
|
|
if (goto_ch)
|
|
|
|
tcf_chain_put_by_act(goto_ch);
|
|
|
|
if (params)
|
2022-11-07 04:34:16 +08:00
|
|
|
call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
|
2019-07-09 15:30:48 +08:00
|
|
|
|
|
|
|
return res;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (goto_ch)
|
|
|
|
tcf_chain_put_by_act(goto_ch);
|
2022-11-07 04:34:16 +08:00
|
|
|
if (params)
|
|
|
|
tcf_ct_params_free(params);
|
2019-07-09 15:30:48 +08:00
|
|
|
tcf_idr_release(*a, bind);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcf_ct_cleanup(struct tc_action *a)
|
|
|
|
{
|
|
|
|
struct tcf_ct_params *params;
|
|
|
|
struct tcf_ct *c = to_ct(a);
|
|
|
|
|
|
|
|
params = rcu_dereference_protected(c->params, 1);
|
|
|
|
if (params)
|
2022-11-07 04:34:16 +08:00
|
|
|
call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_dump_key_val(struct sk_buff *skb,
|
|
|
|
void *val, int val_type,
|
|
|
|
void *mask, int mask_type,
|
|
|
|
int len)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (mask && !memchr_inv(mask, 0, len))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err = nla_put(skb, val_type, len, val);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (mask_type != TCA_CT_UNSPEC) {
|
|
|
|
err = nla_put(skb, mask_type, len, mask);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
|
|
|
|
{
|
|
|
|
struct nf_nat_range2 *range = &p->range;
|
|
|
|
|
|
|
|
if (!(p->ct_action & TCA_CT_ACT_NAT))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (range->flags & NF_NAT_RANGE_MAP_IPS) {
|
|
|
|
if (p->ipv4_range) {
|
|
|
|
if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
|
|
|
|
range->min_addr.ip))
|
|
|
|
return -1;
|
|
|
|
if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
|
|
|
|
range->max_addr.ip))
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
|
|
|
|
&range->min_addr.in6))
|
|
|
|
return -1;
|
|
|
|
if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
|
|
|
|
&range->max_addr.in6))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
|
|
|
|
if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
|
|
|
|
range->min_proto.all))
|
|
|
|
return -1;
|
|
|
|
if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
|
|
|
|
range->max_proto.all))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-07 04:34:17 +08:00
|
|
|
static int tcf_ct_dump_helper(struct sk_buff *skb, struct nf_conntrack_helper *helper)
|
|
|
|
{
|
|
|
|
if (!helper)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (nla_put_string(skb, TCA_CT_HELPER_NAME, helper->name) ||
|
|
|
|
nla_put_u8(skb, TCA_CT_HELPER_FAMILY, helper->tuple.src.l3num) ||
|
|
|
|
nla_put_u8(skb, TCA_CT_HELPER_PROTO, helper->tuple.dst.protonum))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
|
|
|
|
int bind, int ref)
|
|
|
|
{
|
|
|
|
unsigned char *b = skb_tail_pointer(skb);
|
|
|
|
struct tcf_ct *c = to_ct(a);
|
|
|
|
struct tcf_ct_params *p;
|
|
|
|
|
|
|
|
struct tc_ct opt = {
|
|
|
|
.index = c->tcf_index,
|
|
|
|
.refcnt = refcount_read(&c->tcf_refcnt) - ref,
|
|
|
|
.bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
|
|
|
|
};
|
|
|
|
struct tcf_t t;
|
|
|
|
|
|
|
|
spin_lock_bh(&c->tcf_lock);
|
|
|
|
p = rcu_dereference_protected(c->params,
|
|
|
|
lockdep_is_held(&c->tcf_lock));
|
|
|
|
opt.action = c->tcf_action;
|
|
|
|
|
|
|
|
if (tcf_ct_dump_key_val(skb,
|
|
|
|
&p->ct_action, TCA_CT_ACTION,
|
|
|
|
NULL, TCA_CT_UNSPEC,
|
|
|
|
sizeof(p->ct_action)))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (p->ct_action & TCA_CT_ACT_CLEAR)
|
|
|
|
goto skip_dump;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
|
|
|
|
tcf_ct_dump_key_val(skb,
|
|
|
|
&p->mark, TCA_CT_MARK,
|
|
|
|
&p->mark_mask, TCA_CT_MARK_MASK,
|
|
|
|
sizeof(p->mark)))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
|
|
|
|
tcf_ct_dump_key_val(skb,
|
|
|
|
p->labels, TCA_CT_LABELS,
|
|
|
|
p->labels_mask, TCA_CT_LABELS_MASK,
|
|
|
|
sizeof(p->labels)))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
|
|
|
|
tcf_ct_dump_key_val(skb,
|
|
|
|
&p->zone, TCA_CT_ZONE,
|
|
|
|
NULL, TCA_CT_UNSPEC,
|
|
|
|
sizeof(p->zone)))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
if (tcf_ct_dump_nat(skb, p))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2022-11-07 04:34:17 +08:00
|
|
|
if (tcf_ct_dump_helper(skb, p->helper))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
skip_dump:
|
|
|
|
if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
tcf_tm_dump(&t, &c->tcf_tm);
|
|
|
|
if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
|
|
|
|
goto nla_put_failure;
|
|
|
|
spin_unlock_bh(&c->tcf_lock);
|
|
|
|
|
|
|
|
return skb->len;
|
|
|
|
nla_put_failure:
|
|
|
|
spin_unlock_bh(&c->tcf_lock);
|
|
|
|
nlmsg_trim(skb, b);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-06-19 14:01:07 +08:00
|
|
|
static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
|
|
|
|
u64 drops, u64 lastuse, bool hw)
|
2019-07-09 15:30:48 +08:00
|
|
|
{
|
|
|
|
struct tcf_ct *c = to_ct(a);
|
|
|
|
|
2020-06-19 14:01:07 +08:00
|
|
|
tcf_action_update_stats(a, bytes, packets, drops, hw);
|
2019-07-09 15:30:48 +08:00
|
|
|
c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
|
|
|
|
}
|
|
|
|
|
2021-12-18 02:16:21 +08:00
|
|
|
static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
|
2022-04-07 15:35:22 +08:00
|
|
|
u32 *index_inc, bool bind,
|
|
|
|
struct netlink_ext_ack *extack)
|
2021-12-18 02:16:21 +08:00
|
|
|
{
|
|
|
|
if (bind) {
|
|
|
|
struct flow_action_entry *entry = entry_data;
|
|
|
|
|
2023-11-14 01:53:28 +08:00
|
|
|
if (tcf_ct_helper(act))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2021-12-18 02:16:21 +08:00
|
|
|
entry->id = FLOW_ACTION_CT;
|
|
|
|
entry->ct.action = tcf_ct_action(act);
|
|
|
|
entry->ct.zone = tcf_ct_zone(act);
|
|
|
|
entry->ct.flow_table = tcf_ct_ft(act);
|
|
|
|
*index_inc = 1;
|
|
|
|
} else {
|
2021-12-18 02:16:22 +08:00
|
|
|
struct flow_offload_action *fl_action = entry_data;
|
|
|
|
|
|
|
|
fl_action->id = FLOW_ACTION_CT;
|
2021-12-18 02:16:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-09 15:30:48 +08:00
|
|
|
static struct tc_action_ops act_ct_ops = {
|
|
|
|
.kind = "ct",
|
|
|
|
.id = TCA_ID_CT,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.act = tcf_ct_act,
|
|
|
|
.dump = tcf_ct_dump,
|
|
|
|
.init = tcf_ct_init,
|
|
|
|
.cleanup = tcf_ct_cleanup,
|
|
|
|
.stats_update = tcf_stats_update,
|
2021-12-18 02:16:21 +08:00
|
|
|
.offload_act_setup = tcf_ct_offload_act_setup,
|
2019-07-09 15:30:48 +08:00
|
|
|
.size = sizeof(struct tcf_ct),
|
|
|
|
};
|
2024-02-01 21:09:41 +08:00
|
|
|
MODULE_ALIAS_NET_ACT("ct");
|
2019-07-09 15:30:48 +08:00
|
|
|
|
|
|
|
static __net_init int ct_init_net(struct net *net)
|
|
|
|
{
|
2022-09-08 12:14:33 +08:00
|
|
|
struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
|
2019-07-09 15:30:48 +08:00
|
|
|
|
2019-08-26 01:01:32 +08:00
|
|
|
return tc_action_net_init(net, &tn->tn, &act_ct_ops);
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __net_exit ct_exit_net(struct list_head *net_list)
|
|
|
|
{
|
2022-09-08 12:14:33 +08:00
|
|
|
tc_action_net_exit(net_list, act_ct_ops.net_id);
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations ct_net_ops = {
|
|
|
|
.init = ct_init_net,
|
|
|
|
.exit_batch = ct_exit_net,
|
2022-09-08 12:14:33 +08:00
|
|
|
.id = &act_ct_ops.net_id,
|
2019-07-09 15:30:48 +08:00
|
|
|
.size = sizeof(struct tc_ct_action_net),
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init ct_init_module(void)
|
|
|
|
{
|
2020-03-03 21:07:49 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
|
|
|
|
if (!act_ct_wq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = tcf_ct_flow_tables_init();
|
|
|
|
if (err)
|
|
|
|
goto err_tbl_init;
|
|
|
|
|
|
|
|
err = tcf_register_action(&act_ct_ops, &ct_net_ops);
|
|
|
|
if (err)
|
|
|
|
goto err_register;
|
|
|
|
|
2020-11-25 12:01:23 +08:00
|
|
|
static_branch_inc(&tcf_frag_xmit_count);
|
|
|
|
|
2020-03-03 21:07:49 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_register:
|
|
|
|
tcf_ct_flow_tables_uninit();
|
2020-07-30 16:14:28 +08:00
|
|
|
err_tbl_init:
|
|
|
|
destroy_workqueue(act_ct_wq);
|
2020-03-03 21:07:49 +08:00
|
|
|
return err;
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ct_cleanup_module(void)
|
|
|
|
{
|
2020-11-25 12:01:23 +08:00
|
|
|
static_branch_dec(&tcf_frag_xmit_count);
|
2019-07-09 15:30:48 +08:00
|
|
|
tcf_unregister_action(&act_ct_ops, &ct_net_ops);
|
2020-03-03 21:07:49 +08:00
|
|
|
tcf_ct_flow_tables_uninit();
|
|
|
|
destroy_workqueue(act_ct_wq);
|
2019-07-09 15:30:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ct_init_module);
|
|
|
|
module_exit(ct_cleanup_module);
|
|
|
|
MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
|
|
|
|
MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
|
|
|
|
MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
|
|
|
|
MODULE_DESCRIPTION("Connection tracking action");
|
|
|
|
MODULE_LICENSE("GPL v2");
|