2019-05-29 22:12:43 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2011-10-26 10:26:31 +08:00
|
|
|
/*
|
2014-09-16 10:37:25 +08:00
|
|
|
* Copyright (c) 2007-2014 Nicira, Inc.
|
2011-10-26 10:26:31 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef DATAPATH_H
|
|
|
|
#define DATAPATH_H 1
|
|
|
|
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/u64_stats_sync.h>
|
2015-07-21 16:43:54 +08:00
|
|
|
#include <net/ip_tunnels.h>
|
2011-10-26 10:26:31 +08:00
|
|
|
|
2015-08-27 02:31:48 +08:00
|
|
|
#include "conntrack.h"
|
2011-10-26 10:26:31 +08:00
|
|
|
#include "flow.h"
|
2013-10-04 09:16:47 +08:00
|
|
|
#include "flow_table.h"
|
2017-11-11 04:09:43 +08:00
|
|
|
#include "meter.h"
|
2017-11-11 04:09:41 +08:00
|
|
|
#include "vport-internal_dev.h"
|
2011-10-26 10:26:31 +08:00
|
|
|
|
2020-07-15 20:09:28 +08:00
|
|
|
#define DP_MAX_PORTS USHRT_MAX
|
|
|
|
#define DP_VPORT_HASH_BUCKETS 1024
|
|
|
|
#define DP_MASKS_REBALANCE_INTERVAL 4000
|
2012-08-24 03:40:54 +08:00
|
|
|
|
2011-10-26 10:26:31 +08:00
|
|
|
/**
|
|
|
|
* struct dp_stats_percpu - per-cpu packet processing statistics for a given
|
|
|
|
* datapath.
|
|
|
|
* @n_hit: Number of received packets for which a matching flow was found in
|
|
|
|
* the flow table.
|
|
|
|
* @n_miss: Number of received packets that had no matching flow in the flow
|
|
|
|
* table. The sum of @n_hit and @n_miss is the number of packets that have
|
|
|
|
* been received by the datapath.
|
|
|
|
* @n_lost: Number of received packets that had no matching flow in the flow
|
|
|
|
* table that could not be sent to userspace (normally due to an overflow in
|
|
|
|
* one of the datapath's queues).
|
2013-10-23 01:42:46 +08:00
|
|
|
* @n_mask_hit: Number of masks looked up for flow match.
|
|
|
|
* @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked
|
|
|
|
* up per packet.
|
2020-07-31 20:20:56 +08:00
|
|
|
* @n_cache_hit: The number of received packets that had their mask found using
|
|
|
|
* the mask cache.
|
2011-10-26 10:26:31 +08:00
|
|
|
*/
|
|
|
|
struct dp_stats_percpu {
|
|
|
|
u64 n_hit;
|
|
|
|
u64 n_missed;
|
|
|
|
u64 n_lost;
|
2013-10-23 01:42:46 +08:00
|
|
|
u64 n_mask_hit;
|
2020-07-31 20:20:56 +08:00
|
|
|
u64 n_cache_hit;
|
2014-02-15 07:10:46 +08:00
|
|
|
struct u64_stats_sync syncp;
|
2011-10-26 10:26:31 +08:00
|
|
|
};
|
|
|
|
|
openvswitch: Introduce per-cpu upcall dispatch
The Open vSwitch kernel module uses the upcall mechanism to send
packets from kernel space to user space when it misses in the kernel
space flow table. The upcall sends packets via a Netlink socket.
Currently, a Netlink socket is created for every vport. In this way,
there is a 1:1 mapping between a vport and a Netlink socket.
When a packet is received by a vport, if it needs to be sent to
user space, it is sent via the corresponding Netlink socket.
This mechanism, with various iterations of the corresponding user
space code, has seen some limitations and issues:
* On systems with a large number of vports, there is a correspondingly
large number of Netlink sockets which can limit scaling.
(https://bugzilla.redhat.com/show_bug.cgi?id=1526306)
* Packet reordering on upcalls.
(https://bugzilla.redhat.com/show_bug.cgi?id=1844576)
* A thundering herd issue.
(https://bugzilla.redhat.com/show_bug.cgi?id=1834444)
This patch introduces an alternative, feature-negotiated, upcall
mode using a per-cpu dispatch rather than a per-vport dispatch.
In this mode, the Netlink socket to be used for the upcall is
selected based on the CPU of the thread that is executing the upcall.
In this way, it resolves the issues above as:
a) The number of Netlink sockets scales with the number of CPUs
rather than the number of vports.
b) Ordering per-flow is maintained as packets are distributed to
CPUs based on mechanisms such as RSS and flows are distributed
to a single user space thread.
c) Packets from a flow can only wake up one user space thread.
The corresponding user space code can be found at:
https://mail.openvswitch.org/pipermail/ovs-dev/2021-July/385139.html
Bugzilla: https://bugzilla.redhat.com/1844576
Signed-off-by: Mark Gray <mark.d.gray@redhat.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-15 20:27:54 +08:00
|
|
|
/**
|
|
|
|
* struct dp_nlsk_pids - array of netlink portids of for a datapath.
|
|
|
|
* This is used when OVS_DP_F_DISPATCH_UPCALL_PER_CPU
|
|
|
|
* is enabled and must be protected by rcu.
|
|
|
|
* @rcu: RCU callback head for deferred destruction.
|
|
|
|
* @n_pids: Size of @pids array.
|
|
|
|
* @pids: Array storing the Netlink socket PIDs indexed by CPU ID for packets
|
|
|
|
* that miss the flow table.
|
|
|
|
*/
|
|
|
|
struct dp_nlsk_pids {
|
|
|
|
struct rcu_head rcu;
|
|
|
|
u32 n_pids;
|
|
|
|
u32 pids[];
|
|
|
|
};
|
|
|
|
|
2011-10-26 10:26:31 +08:00
|
|
|
/**
|
|
|
|
* struct datapath - datapath for flow-based packet switching
|
|
|
|
* @rcu: RCU callback head for deferred destruction.
|
|
|
|
* @list_node: Element in global 'dps' list.
|
2013-10-04 15:14:23 +08:00
|
|
|
* @table: flow table.
|
2012-08-24 03:40:54 +08:00
|
|
|
* @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by
|
2013-04-16 04:23:03 +08:00
|
|
|
* ovs_mutex and RCU.
|
2011-10-26 10:26:31 +08:00
|
|
|
* @stats_percpu: Per-CPU datapath statistics.
|
2012-02-23 11:58:59 +08:00
|
|
|
* @net: Reference to net namespace.
|
2016-02-26 17:45:39 +08:00
|
|
|
* @max_headroom: the maximum headroom of all vports in this datapath; it will
|
|
|
|
* be used by all the internal vports in this dp.
|
openvswitch: Introduce per-cpu upcall dispatch
The Open vSwitch kernel module uses the upcall mechanism to send
packets from kernel space to user space when it misses in the kernel
space flow table. The upcall sends packets via a Netlink socket.
Currently, a Netlink socket is created for every vport. In this way,
there is a 1:1 mapping between a vport and a Netlink socket.
When a packet is received by a vport, if it needs to be sent to
user space, it is sent via the corresponding Netlink socket.
This mechanism, with various iterations of the corresponding user
space code, has seen some limitations and issues:
* On systems with a large number of vports, there is a correspondingly
large number of Netlink sockets which can limit scaling.
(https://bugzilla.redhat.com/show_bug.cgi?id=1526306)
* Packet reordering on upcalls.
(https://bugzilla.redhat.com/show_bug.cgi?id=1844576)
* A thundering herd issue.
(https://bugzilla.redhat.com/show_bug.cgi?id=1834444)
This patch introduces an alternative, feature-negotiated, upcall
mode using a per-cpu dispatch rather than a per-vport dispatch.
In this mode, the Netlink socket to be used for the upcall is
selected based on the CPU of the thread that is executing the upcall.
In this way, it resolves the issues above as:
a) The number of Netlink sockets scales with the number of CPUs
rather than the number of vports.
b) Ordering per-flow is maintained as packets are distributed to
CPUs based on mechanisms such as RSS and flows are distributed
to a single user space thread.
c) Packets from a flow can only wake up one user space thread.
The corresponding user space code can be found at:
https://mail.openvswitch.org/pipermail/ovs-dev/2021-July/385139.html
Bugzilla: https://bugzilla.redhat.com/1844576
Signed-off-by: Mark Gray <mark.d.gray@redhat.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-15 20:27:54 +08:00
|
|
|
* @upcall_portids: RCU protected 'struct dp_nlsk_pids'.
|
2011-10-26 10:26:31 +08:00
|
|
|
*
|
|
|
|
* Context: See the comment on locking at the top of datapath.c for additional
|
|
|
|
* locking information.
|
|
|
|
*/
|
|
|
|
struct datapath {
|
|
|
|
struct rcu_head rcu;
|
|
|
|
struct list_head list_node;
|
|
|
|
|
|
|
|
/* Flow table. */
|
2013-10-04 15:14:23 +08:00
|
|
|
struct flow_table table;
|
2011-10-26 10:26:31 +08:00
|
|
|
|
|
|
|
/* Switch ports. */
|
2012-08-24 03:40:54 +08:00
|
|
|
struct hlist_head *ports;
|
2011-10-26 10:26:31 +08:00
|
|
|
|
|
|
|
/* Stats. */
|
|
|
|
struct dp_stats_percpu __percpu *stats_percpu;
|
2012-02-23 11:58:59 +08:00
|
|
|
|
|
|
|
/* Network namespace ref. */
|
2015-03-12 12:06:44 +08:00
|
|
|
possible_net_t net;
|
2013-12-13 22:22:18 +08:00
|
|
|
|
|
|
|
u32 user_features;
|
2016-02-26 17:45:39 +08:00
|
|
|
|
|
|
|
u32 max_headroom;
|
2017-11-11 04:09:42 +08:00
|
|
|
|
|
|
|
/* Switch meters. */
|
2020-04-24 08:08:02 +08:00
|
|
|
struct dp_meter_table meter_tbl;
|
openvswitch: Introduce per-cpu upcall dispatch
The Open vSwitch kernel module uses the upcall mechanism to send
packets from kernel space to user space when it misses in the kernel
space flow table. The upcall sends packets via a Netlink socket.
Currently, a Netlink socket is created for every vport. In this way,
there is a 1:1 mapping between a vport and a Netlink socket.
When a packet is received by a vport, if it needs to be sent to
user space, it is sent via the corresponding Netlink socket.
This mechanism, with various iterations of the corresponding user
space code, has seen some limitations and issues:
* On systems with a large number of vports, there is a correspondingly
large number of Netlink sockets which can limit scaling.
(https://bugzilla.redhat.com/show_bug.cgi?id=1526306)
* Packet reordering on upcalls.
(https://bugzilla.redhat.com/show_bug.cgi?id=1844576)
* A thundering herd issue.
(https://bugzilla.redhat.com/show_bug.cgi?id=1834444)
This patch introduces an alternative, feature-negotiated, upcall
mode using a per-cpu dispatch rather than a per-vport dispatch.
In this mode, the Netlink socket to be used for the upcall is
selected based on the CPU of the thread that is executing the upcall.
In this way, it resolves the issues above as:
a) The number of Netlink sockets scales with the number of CPUs
rather than the number of vports.
b) Ordering per-flow is maintained as packets are distributed to
CPUs based on mechanisms such as RSS and flows are distributed
to a single user space thread.
c) Packets from a flow can only wake up one user space thread.
The corresponding user space code can be found at:
https://mail.openvswitch.org/pipermail/ovs-dev/2021-July/385139.html
Bugzilla: https://bugzilla.redhat.com/1844576
Signed-off-by: Mark Gray <mark.d.gray@redhat.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-15 20:27:54 +08:00
|
|
|
|
|
|
|
struct dp_nlsk_pids __rcu *upcall_portids;
|
2011-10-26 10:26:31 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct ovs_skb_cb - OVS data in skb CB
|
2014-09-16 10:20:31 +08:00
|
|
|
* @input_vport: The original vport packet came in on. This value is cached
|
|
|
|
* when a packet is received by OVS.
|
2015-08-27 02:31:48 +08:00
|
|
|
* @mru: The maximum received fragement size; 0 if the packet is not
|
|
|
|
* fragmented.
|
openvswitch: fix skb_panic due to the incorrect actions attrlen
For sw_flow_actions, the actions_len only represents the kernel part's
size, and when we dump the actions to the userspace, we will do the
convertions, so it's true size may become bigger than the actions_len.
But unfortunately, for OVS_PACKET_ATTR_ACTIONS, we use the actions_len
to alloc the skbuff, so the user_skb's size may become insufficient and
oops will happen like this:
skbuff: skb_over_panic: text:ffffffff8148fabf len:1749 put:157 head:
ffff881300f39000 data:ffff881300f39000 tail:0x6d5 end:0x6c0 dev:<NULL>
------------[ cut here ]------------
kernel BUG at net/core/skbuff.c:129!
[...]
Call Trace:
<IRQ>
[<ffffffff8148be82>] skb_put+0x43/0x44
[<ffffffff8148fabf>] skb_zerocopy+0x6c/0x1f4
[<ffffffffa0290d36>] queue_userspace_packet+0x3a3/0x448 [openvswitch]
[<ffffffffa0292023>] ovs_dp_upcall+0x30/0x5c [openvswitch]
[<ffffffffa028d435>] output_userspace+0x132/0x158 [openvswitch]
[<ffffffffa01e6890>] ? ip6_rcv_finish+0x74/0x77 [ipv6]
[<ffffffffa028e277>] do_execute_actions+0xcc1/0xdc8 [openvswitch]
[<ffffffffa028e3f2>] ovs_execute_actions+0x74/0x106 [openvswitch]
[<ffffffffa0292130>] ovs_dp_process_packet+0xe1/0xfd [openvswitch]
[<ffffffffa0292b77>] ? key_extract+0x63c/0x8d5 [openvswitch]
[<ffffffffa029848b>] ovs_vport_receive+0xa1/0xc3 [openvswitch]
[...]
Also we can find that the actions_len is much little than the orig_len:
crash> struct sw_flow_actions 0xffff8812f539d000
struct sw_flow_actions {
rcu = {
next = 0xffff8812f5398800,
func = 0xffffe3b00035db32
},
orig_len = 1384,
actions_len = 592,
actions = 0xffff8812f539d01c
}
So as a quick fix, use the orig_len instead of the actions_len to alloc
the user_skb.
Last, this oops happened on our system running a relative old kernel, but
the same risk still exists on the mainline, since we use the wrong
actions_len from the beginning.
Fixes: ccea74457bbd ("openvswitch: include datapath actions with sampled-packet upcall to userspace")
Cc: Neil McKee <neil.mckee@inmon.com>
Signed-off-by: Liping Zhang <zlpnobody@gmail.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-16 13:30:07 +08:00
|
|
|
* @acts_origlen: The netlink size of the flow actions applied to this skb.
|
2017-07-03 19:46:43 +08:00
|
|
|
* @cutlen: The number of bytes from the packet end to be removed.
|
2024-07-04 16:56:57 +08:00
|
|
|
* @probability: The sampling probability that was applied to this skb; 0 means
|
|
|
|
* no sampling has occurred; U32_MAX means 100% probability.
|
2011-10-26 10:26:31 +08:00
|
|
|
*/
|
|
|
|
struct ovs_skb_cb {
|
2014-09-16 10:20:31 +08:00
|
|
|
struct vport *input_vport;
|
2015-08-27 02:31:48 +08:00
|
|
|
u16 mru;
|
openvswitch: fix skb_panic due to the incorrect actions attrlen
For sw_flow_actions, the actions_len only represents the kernel part's
size, and when we dump the actions to the userspace, we will do the
convertions, so it's true size may become bigger than the actions_len.
But unfortunately, for OVS_PACKET_ATTR_ACTIONS, we use the actions_len
to alloc the skbuff, so the user_skb's size may become insufficient and
oops will happen like this:
skbuff: skb_over_panic: text:ffffffff8148fabf len:1749 put:157 head:
ffff881300f39000 data:ffff881300f39000 tail:0x6d5 end:0x6c0 dev:<NULL>
------------[ cut here ]------------
kernel BUG at net/core/skbuff.c:129!
[...]
Call Trace:
<IRQ>
[<ffffffff8148be82>] skb_put+0x43/0x44
[<ffffffff8148fabf>] skb_zerocopy+0x6c/0x1f4
[<ffffffffa0290d36>] queue_userspace_packet+0x3a3/0x448 [openvswitch]
[<ffffffffa0292023>] ovs_dp_upcall+0x30/0x5c [openvswitch]
[<ffffffffa028d435>] output_userspace+0x132/0x158 [openvswitch]
[<ffffffffa01e6890>] ? ip6_rcv_finish+0x74/0x77 [ipv6]
[<ffffffffa028e277>] do_execute_actions+0xcc1/0xdc8 [openvswitch]
[<ffffffffa028e3f2>] ovs_execute_actions+0x74/0x106 [openvswitch]
[<ffffffffa0292130>] ovs_dp_process_packet+0xe1/0xfd [openvswitch]
[<ffffffffa0292b77>] ? key_extract+0x63c/0x8d5 [openvswitch]
[<ffffffffa029848b>] ovs_vport_receive+0xa1/0xc3 [openvswitch]
[...]
Also we can find that the actions_len is much little than the orig_len:
crash> struct sw_flow_actions 0xffff8812f539d000
struct sw_flow_actions {
rcu = {
next = 0xffff8812f5398800,
func = 0xffffe3b00035db32
},
orig_len = 1384,
actions_len = 592,
actions = 0xffff8812f539d01c
}
So as a quick fix, use the orig_len instead of the actions_len to alloc
the user_skb.
Last, this oops happened on our system running a relative old kernel, but
the same risk still exists on the mainline, since we use the wrong
actions_len from the beginning.
Fixes: ccea74457bbd ("openvswitch: include datapath actions with sampled-packet upcall to userspace")
Cc: Neil McKee <neil.mckee@inmon.com>
Signed-off-by: Liping Zhang <zlpnobody@gmail.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-16 13:30:07 +08:00
|
|
|
u16 acts_origlen;
|
2016-06-11 02:49:33 +08:00
|
|
|
u32 cutlen;
|
2024-07-04 16:56:57 +08:00
|
|
|
u32 probability;
|
2011-10-26 10:26:31 +08:00
|
|
|
};
|
|
|
|
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct dp_upcall - metadata to include with a packet to send to userspace
|
|
|
|
* @cmd: One of %OVS_PACKET_CMD_*.
|
2013-02-16 09:29:22 +08:00
|
|
|
* @userdata: If nonnull, its variable-length value is passed to userspace as
|
2011-10-26 10:26:31 +08:00
|
|
|
* %OVS_PACKET_ATTR_USERDATA.
|
2014-11-06 22:57:27 +08:00
|
|
|
* @portid: Netlink portid to which packet should be sent. If @portid is 0
|
|
|
|
* then no packet is sent and the packet is accounted in the datapath's @n_lost
|
2011-10-26 10:26:31 +08:00
|
|
|
* counter.
|
2014-11-06 22:51:24 +08:00
|
|
|
* @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
|
2015-08-27 02:31:48 +08:00
|
|
|
* @mru: If not zero, Maximum received IP fragment size.
|
2011-10-26 10:26:31 +08:00
|
|
|
*/
|
|
|
|
struct dp_upcall_info {
|
2015-08-31 09:09:38 +08:00
|
|
|
struct ip_tunnel_info *egress_tun_info;
|
2011-10-26 10:26:31 +08:00
|
|
|
const struct nlattr *userdata;
|
2015-05-27 11:59:43 +08:00
|
|
|
const struct nlattr *actions;
|
|
|
|
int actions_len;
|
2012-09-08 04:12:54 +08:00
|
|
|
u32 portid;
|
2014-11-06 22:57:27 +08:00
|
|
|
u8 cmd;
|
2015-08-27 02:31:48 +08:00
|
|
|
u16 mru;
|
2011-10-26 10:26:31 +08:00
|
|
|
};
|
|
|
|
|
2013-04-16 04:23:03 +08:00
|
|
|
/**
|
|
|
|
* struct ovs_net - Per net-namespace data for ovs.
|
|
|
|
* @dps: List of datapaths to enable dumping them all out.
|
|
|
|
* Protected by genl_mutex.
|
|
|
|
*/
|
|
|
|
struct ovs_net {
|
|
|
|
struct list_head dps;
|
|
|
|
struct work_struct dp_notify_work;
|
2020-07-24 16:20:59 +08:00
|
|
|
struct delayed_work masks_rebalance;
|
openvswitch: Support conntrack zone limit
Currently, nf_conntrack_max is used to limit the maximum number of
conntrack entries in the conntrack table for every network namespace.
For the VMs and containers that reside in the same namespace,
they share the same conntrack table, and the total # of conntrack entries
for all the VMs and containers are limited by nf_conntrack_max. In this
case, if one of the VM/container abuses the usage the conntrack entries,
it blocks the others from committing valid conntrack entries into the
conntrack table. Even if we can possibly put the VM in different network
namespace, the current nf_conntrack_max configuration is kind of rigid
that we cannot limit different VM/container to have different # conntrack
entries.
To address the aforementioned issue, this patch proposes to have a
fine-grained mechanism that could further limit the # of conntrack entries
per-zone. For example, we can designate different zone to different VM,
and set conntrack limit to each zone. By providing this isolation, a
mis-behaved VM only consumes the conntrack entries in its own zone, and
it will not influence other well-behaved VMs. Moreover, the users can
set various conntrack limit to different zone based on their preference.
The proposed implementation utilizes Netfilter's nf_conncount backend
to count the number of connections in a particular zone. If the number of
connection is above a configured limitation, ovs will return ENOMEM to the
userspace. If userspace does not configure the zone limit, the limit
defaults to zero that is no limitation, which is backward compatible to
the behavior without this patch.
The following high leve APIs are provided to the userspace:
- OVS_CT_LIMIT_CMD_SET:
* set default connection limit for all zones
* set the connection limit for a particular zone
- OVS_CT_LIMIT_CMD_DEL:
* remove the connection limit for a particular zone
- OVS_CT_LIMIT_CMD_GET:
* get the default connection limit for all zones
* get the connection limit for a particular zone
Signed-off-by: Yi-Hung Wei <yihung.wei@gmail.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-05-25 08:56:43 +08:00
|
|
|
#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
|
|
|
|
struct ovs_ct_limit_info *ct_limit_info;
|
|
|
|
#endif
|
net: openvswitch: add hash info to upcall
When using the kernel datapath, the upcall don't
include skb hash info relatived. That will introduce
some problem, because the hash of skb is important
in kernel stack. For example, VXLAN module uses
it to select UDP src port. The tx queue selection
may also use the hash in stack.
Hash is computed in different ways. Hash is random
for a TCP socket, and hash may be computed in hardware,
or software stack. Recalculation hash is not easy.
Hash of TCP socket is computed:
tcp_v4_connect
-> sk_set_txhash (is random)
__tcp_transmit_skb
-> skb_set_hash_from_sk
There will be one upcall, without information of skb
hash, to ovs-vswitchd, for the first packet of a TCP
session. The rest packets will be processed in Open vSwitch
modules, hash kept. If this tcp session is forward to
VXLAN module, then the UDP src port of first tcp packet
is different from rest packets.
TCP packets may come from the host or dockers, to Open vSwitch.
To fix it, we store the hash info to upcall, and restore hash
when packets sent back.
+---------------+ +-------------------------+
| Docker/VMs | | ovs-vswitchd |
+----+----------+ +-+--------------------+--+
| ^ |
| | |
| | upcall v restore packet hash (not recalculate)
| +-+--------------------+--+
| tap netdev | | vxlan module
+---------------> +--> Open vSwitch ko +-->
or internal type | |
+-------------------------+
Reported-at: https://mail.openvswitch.org/pipermail/ovs-dev/2019-October/364062.html
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-13 23:04:49 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* enum ovs_pkt_hash_types - hash info to include with a packet
|
|
|
|
* to send to userspace.
|
|
|
|
* @OVS_PACKET_HASH_SW_BIT: indicates hash was computed in software stack.
|
|
|
|
* @OVS_PACKET_HASH_L4_BIT: indicates hash is a canonical 4-tuple hash
|
|
|
|
* over transport ports.
|
|
|
|
*/
|
|
|
|
enum ovs_pkt_hash_types {
|
|
|
|
OVS_PACKET_HASH_SW_BIT = (1ULL << 32),
|
|
|
|
OVS_PACKET_HASH_L4_BIT = (1ULL << 33),
|
2013-04-16 04:23:03 +08:00
|
|
|
};
|
|
|
|
|
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 09:58:21 +08:00
|
|
|
extern unsigned int ovs_net_id;
|
2013-04-16 04:23:03 +08:00
|
|
|
void ovs_lock(void);
|
|
|
|
void ovs_unlock(void);
|
|
|
|
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
int lockdep_ovsl_is_held(void);
|
|
|
|
#else
|
|
|
|
#define lockdep_ovsl_is_held() 1
|
|
|
|
#endif
|
|
|
|
|
2014-07-30 08:31:08 +08:00
|
|
|
#define ASSERT_OVSL() WARN_ON(!lockdep_ovsl_is_held())
|
2013-04-16 04:23:03 +08:00
|
|
|
#define ovsl_dereference(p) \
|
|
|
|
rcu_dereference_protected(p, lockdep_ovsl_is_held())
|
2013-12-04 02:58:53 +08:00
|
|
|
#define rcu_dereference_ovsl(p) \
|
|
|
|
rcu_dereference_check(p, lockdep_ovsl_is_held())
|
2013-04-16 04:23:03 +08:00
|
|
|
|
2014-11-06 22:58:52 +08:00
|
|
|
static inline struct net *ovs_dp_get_net(const struct datapath *dp)
|
2012-02-23 11:58:59 +08:00
|
|
|
{
|
|
|
|
return read_pnet(&dp->net);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ovs_dp_set_net(struct datapath *dp, struct net *net)
|
|
|
|
{
|
|
|
|
write_pnet(&dp->net, net);
|
|
|
|
}
|
|
|
|
|
2013-04-16 04:23:03 +08:00
|
|
|
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no);
|
|
|
|
|
|
|
|
static inline struct vport *ovs_vport_rcu(const struct datapath *dp, int port_no)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(!rcu_read_lock_held());
|
|
|
|
return ovs_lookup_vport(dp, port_no);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct vport *ovs_vport_ovsl_rcu(const struct datapath *dp, int port_no)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
|
|
|
|
return ovs_lookup_vport(dp, port_no);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_no)
|
|
|
|
{
|
|
|
|
ASSERT_OVSL();
|
|
|
|
return ovs_lookup_vport(dp, port_no);
|
|
|
|
}
|
|
|
|
|
2017-11-11 04:09:41 +08:00
|
|
|
/* Must be called with rcu_read_lock. */
|
|
|
|
static inline struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
|
|
|
|
{
|
|
|
|
struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
|
|
|
|
|
|
|
|
if (dev) {
|
|
|
|
struct vport *vport = ovs_internal_dev_get_vport(dev);
|
|
|
|
|
|
|
|
if (vport)
|
|
|
|
return vport->dp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
|
|
|
|
* returned dp pointer valid.
|
|
|
|
*/
|
|
|
|
static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
|
|
|
|
{
|
|
|
|
struct datapath *dp;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
|
|
|
|
rcu_read_lock();
|
|
|
|
dp = get_dp_rcu(net, dp_ifindex);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return dp;
|
|
|
|
}
|
|
|
|
|
2011-10-26 10:26:31 +08:00
|
|
|
extern struct notifier_block ovs_dp_device_notifier;
|
2013-11-19 22:19:38 +08:00
|
|
|
extern struct genl_family dp_vport_genl_family;
|
2011-10-26 10:26:31 +08:00
|
|
|
|
2014-09-16 10:28:44 +08:00
|
|
|
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key);
|
2011-10-26 10:26:31 +08:00
|
|
|
void ovs_dp_detach_port(struct vport *);
|
|
|
|
int ovs_dp_upcall(struct datapath *, struct sk_buff *,
|
2016-06-11 02:49:33 +08:00
|
|
|
const struct sw_flow_key *, const struct dp_upcall_info *,
|
|
|
|
uint32_t cutlen);
|
2011-10-26 10:26:31 +08:00
|
|
|
|
openvswitch: Introduce per-cpu upcall dispatch
The Open vSwitch kernel module uses the upcall mechanism to send
packets from kernel space to user space when it misses in the kernel
space flow table. The upcall sends packets via a Netlink socket.
Currently, a Netlink socket is created for every vport. In this way,
there is a 1:1 mapping between a vport and a Netlink socket.
When a packet is received by a vport, if it needs to be sent to
user space, it is sent via the corresponding Netlink socket.
This mechanism, with various iterations of the corresponding user
space code, has seen some limitations and issues:
* On systems with a large number of vports, there is a correspondingly
large number of Netlink sockets which can limit scaling.
(https://bugzilla.redhat.com/show_bug.cgi?id=1526306)
* Packet reordering on upcalls.
(https://bugzilla.redhat.com/show_bug.cgi?id=1844576)
* A thundering herd issue.
(https://bugzilla.redhat.com/show_bug.cgi?id=1834444)
This patch introduces an alternative, feature-negotiated, upcall
mode using a per-cpu dispatch rather than a per-vport dispatch.
In this mode, the Netlink socket to be used for the upcall is
selected based on the CPU of the thread that is executing the upcall.
In this way, it resolves the issues above as:
a) The number of Netlink sockets scales with the number of CPUs
rather than the number of vports.
b) Ordering per-flow is maintained as packets are distributed to
CPUs based on mechanisms such as RSS and flows are distributed
to a single user space thread.
c) Packets from a flow can only wake up one user space thread.
The corresponding user space code can be found at:
https://mail.openvswitch.org/pipermail/ovs-dev/2021-July/385139.html
Bugzilla: https://bugzilla.redhat.com/1844576
Signed-off-by: Mark Gray <mark.d.gray@redhat.com>
Acked-by: Flavio Leitner <fbl@sysclose.org>
Acked-by: Pravin B Shelar <pshelar@ovn.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-07-15 20:27:54 +08:00
|
|
|
u32 ovs_dp_get_upcall_portid(const struct datapath *dp, uint32_t cpu_id);
|
|
|
|
|
2014-09-16 10:37:25 +08:00
|
|
|
const char *ovs_dp_name(const struct datapath *dp);
|
2017-11-03 03:04:37 +08:00
|
|
|
struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
|
|
|
|
u32 portid, u32 seq, u8 cmd);
|
2011-10-26 10:26:31 +08:00
|
|
|
|
2014-09-16 10:15:28 +08:00
|
|
|
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
|
2014-11-06 22:58:52 +08:00
|
|
|
const struct sw_flow_actions *, struct sw_flow_key *);
|
2014-09-16 10:37:25 +08:00
|
|
|
|
2013-04-16 04:23:03 +08:00
|
|
|
void ovs_dp_notify_wq(struct work_struct *work);
|
2013-08-08 11:01:00 +08:00
|
|
|
|
2014-09-16 10:37:25 +08:00
|
|
|
int action_fifos_init(void);
|
|
|
|
void action_fifos_exit(void);
|
|
|
|
|
2015-08-27 02:31:45 +08:00
|
|
|
/* 'KEY' must not have any bits set outside of the 'MASK' */
|
|
|
|
#define OVS_MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
|
|
|
|
#define OVS_SET_MASKED(OLD, KEY, MASK) ((OLD) = OVS_MASKED(OLD, KEY, MASK))
|
|
|
|
|
2014-11-06 23:03:05 +08:00
|
|
|
#define OVS_NLERR(logging_allowed, fmt, ...) \
|
2014-02-04 09:06:46 +08:00
|
|
|
do { \
|
2014-11-06 23:03:05 +08:00
|
|
|
if (logging_allowed && net_ratelimit()) \
|
|
|
|
pr_info("netlink: " fmt "\n", ##__VA_ARGS__); \
|
2014-02-04 09:06:46 +08:00
|
|
|
} while (0)
|
2011-10-26 10:26:31 +08:00
|
|
|
#endif /* datapath.h */
|