mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
net: allow gso_max_size to exceed 65536
The code for gso_max_size was added originally to allow for debugging and workaround of buggy devices that couldn't support TSO with blocks 64K in size. The original reason for limiting it to 64K was because that was the existing limits of IPv4 and non-jumbogram IPv6 length fields. With the addition of Big TCP we can remove this limit and allow the value to potentially go up to UINT_MAX and instead be limited by the tso_max_size value. So in order to support this we need to go through and clean up the remaining users of the gso_max_size value so that the values will cap at 64K for non-TCPv6 flows. In addition we can clean up the GSO_MAX_SIZE value so that 64K becomes GSO_LEGACY_MAX_SIZE and UINT_MAX will now be the upper limit for GSO_MAX_SIZE. v6: (edumazet) fixed a compile error if CONFIG_IPV6=n, in a new sk_trim_gso_size() helper. netif_set_tso_max_size() caps the requested TSO size with GSO_MAX_SIZE. Signed-off-by: Alexander Duyck <alexanderduyck@fb.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
89527be8d8
commit
7c4e983c4f
@ -151,7 +151,8 @@
|
||||
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
|
||||
|
||||
/* Descriptors required for maximum contiguous TSO/GSO packet */
|
||||
#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
|
||||
#define XGBE_TX_MAX_SPLIT \
|
||||
((GSO_LEGACY_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
|
||||
|
||||
/* Maximum possible descriptors needed for an SKB:
|
||||
* - Maximum number of SKB frags
|
||||
|
@ -2038,7 +2038,7 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
|
||||
{
|
||||
int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
|
||||
return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE;
|
||||
return PAGE_SIZE * nr_frags + data_bcnt <= GRO_MAX_SIZE;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1008,7 +1008,8 @@ static int ef100_process_design_param(struct efx_nic *efx,
|
||||
}
|
||||
return 0;
|
||||
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
|
||||
nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
|
||||
nic_data->tso_max_payload_len = min_t(u64, reader->value,
|
||||
GSO_LEGACY_MAX_SIZE);
|
||||
netif_set_tso_max_size(efx->net_dev,
|
||||
nic_data->tso_max_payload_len);
|
||||
return 0;
|
||||
|
@ -98,7 +98,8 @@ unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
|
||||
/* Possibly more for PCIe page boundaries within input fragments */
|
||||
if (PAGE_SIZE > EF4_PAGE_SIZE)
|
||||
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
|
||||
DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
|
||||
DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
|
||||
EF4_PAGE_SIZE));
|
||||
|
||||
return max_descs;
|
||||
}
|
||||
|
@ -416,7 +416,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
|
||||
/* Possibly more for PCIe page boundaries within input fragments */
|
||||
if (PAGE_SIZE > EFX_PAGE_SIZE)
|
||||
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
|
||||
DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
|
||||
DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
|
||||
EFX_PAGE_SIZE));
|
||||
|
||||
return max_descs;
|
||||
}
|
||||
|
@ -38,7 +38,8 @@
|
||||
#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3)
|
||||
|
||||
/* Descriptors required for maximum contiguous TSO/GSO packet */
|
||||
#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
|
||||
#define XLGMAC_TX_MAX_SPLIT \
|
||||
((GSO_LEGACY_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
|
||||
|
||||
/* Maximum possible descriptors needed for a SKB */
|
||||
#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
|
||||
|
@ -1349,7 +1349,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
|
||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||
struct ndis_offload hwcaps;
|
||||
struct ndis_offload_params offloads;
|
||||
unsigned int gso_max_size = GSO_MAX_SIZE;
|
||||
unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
|
||||
int ret;
|
||||
|
||||
/* Find HW offload capabilities */
|
||||
|
@ -667,7 +667,7 @@ static void fcoe_netdev_features_change(struct fc_lport *lport,
|
||||
|
||||
if (netdev->features & NETIF_F_FSO) {
|
||||
lport->seq_offload = 1;
|
||||
lport->lso_max = netdev->gso_max_size;
|
||||
lport->lso_max = min(netdev->gso_max_size, GSO_LEGACY_MAX_SIZE);
|
||||
FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
|
||||
lport->lso_max);
|
||||
} else {
|
||||
|
@ -2272,7 +2272,9 @@ struct net_device {
|
||||
const struct rtnl_link_ops *rtnl_link_ops;
|
||||
|
||||
/* for setting kernel sock attribute on TCP connection setup */
|
||||
#define GSO_MAX_SIZE 65536
|
||||
#define GSO_LEGACY_MAX_SIZE 65536u
|
||||
#define GSO_MAX_SIZE UINT_MAX
|
||||
|
||||
unsigned int gso_max_size;
|
||||
#define TSO_LEGACY_MAX_SIZE 65536
|
||||
#define TSO_MAX_SIZE UINT_MAX
|
||||
|
@ -1001,7 +1001,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
|
||||
cb->pkt_len = skb->len;
|
||||
} else {
|
||||
if (__skb->wire_len < skb->len ||
|
||||
__skb->wire_len > GSO_MAX_SIZE)
|
||||
__skb->wire_len > GSO_LEGACY_MAX_SIZE)
|
||||
return -EINVAL;
|
||||
cb->pkt_len = __skb->wire_len;
|
||||
}
|
||||
|
@ -2998,11 +2998,12 @@ EXPORT_SYMBOL(netif_set_real_num_queues);
|
||||
* @size: max skb->len of a TSO frame
|
||||
*
|
||||
* Set the limit on the size of TSO super-frames the device can handle.
|
||||
* Unless explicitly set the stack will assume the value of %GSO_MAX_SIZE.
|
||||
* Unless explicitly set the stack will assume the value of
|
||||
* %GSO_LEGACY_MAX_SIZE.
|
||||
*/
|
||||
void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
|
||||
{
|
||||
dev->tso_max_size = size;
|
||||
dev->tso_max_size = min(GSO_MAX_SIZE, size);
|
||||
if (size < READ_ONCE(dev->gso_max_size))
|
||||
netif_set_gso_max_size(dev, size);
|
||||
}
|
||||
@ -10595,7 +10596,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
|
||||
|
||||
dev_net_set(dev, &init_net);
|
||||
|
||||
dev->gso_max_size = GSO_MAX_SIZE;
|
||||
dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
|
||||
dev->gso_max_segs = GSO_MAX_SEGS;
|
||||
dev->gro_max_size = GRO_MAX_SIZE;
|
||||
dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
|
||||
|
@ -2817,7 +2817,7 @@ static int do_setlink(const struct sk_buff *skb,
|
||||
if (tb[IFLA_GSO_MAX_SIZE]) {
|
||||
u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
|
||||
|
||||
if (max_size > GSO_MAX_SIZE || max_size > dev->tso_max_size) {
|
||||
if (max_size > dev->tso_max_size) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
|
@ -2293,6 +2293,19 @@ void sk_free_unlock_clone(struct sock *sk)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
|
||||
|
||||
static void sk_trim_gso_size(struct sock *sk)
|
||||
{
|
||||
if (sk->sk_gso_max_size <= GSO_LEGACY_MAX_SIZE)
|
||||
return;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (sk->sk_family == AF_INET6 &&
|
||||
sk_is_tcp(sk) &&
|
||||
!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
|
||||
return;
|
||||
#endif
|
||||
sk->sk_gso_max_size = GSO_LEGACY_MAX_SIZE;
|
||||
}
|
||||
|
||||
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
|
||||
{
|
||||
u32 max_segs = 1;
|
||||
@ -2312,6 +2325,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
|
||||
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
|
||||
/* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */
|
||||
sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size);
|
||||
sk_trim_gso_size(sk);
|
||||
sk->sk_gso_max_size -= (MAX_TCP_HEADER + 1);
|
||||
/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
|
||||
max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
|
||||
|
@ -310,7 +310,7 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
|
||||
*/
|
||||
bytes = min_t(unsigned long,
|
||||
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
|
||||
GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
|
||||
GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER);
|
||||
segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
|
||||
|
||||
return min(segs, 0x7FU);
|
||||
|
@ -1553,7 +1553,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
||||
* SO_SNDBUF values.
|
||||
* Also allow first and last skb in retransmit queue to be split.
|
||||
*/
|
||||
limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
|
||||
limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
|
||||
if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
|
||||
tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
|
||||
skb != tcp_rtx_queue_head(sk) &&
|
||||
|
@ -134,7 +134,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
|
||||
dst_hold(tp->dst);
|
||||
sk_setup_caps(sk, tp->dst);
|
||||
}
|
||||
packet->max_size = sk_can_gso(sk) ? READ_ONCE(tp->dst->dev->gso_max_size)
|
||||
packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size),
|
||||
GSO_LEGACY_MAX_SIZE)
|
||||
: asoc->pathmtu;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user