mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
Including fixes from ipsec and netfilter.
No known outstanding regressions. Fixes to fixes: - virtio-net: set queues after driver_ok, avoid a potential race added by recent fix - Revert "vlan: Fix VLAN 0 memory leak", it may lead to a warning when VLAN 0 is registered explicitly - nf_tables: - fix false-positive lockdep splat in recent fixes - don't fail inserts if duplicate has expired (fix test failures) - fix races between garbage collection and netns dismantle Current release - new code bugs: - mlx5: Fix mlx5_cmd_update_root_ft() error flow Previous releases - regressions: - phy: fix IRQ-based wake-on-lan over hibernate / power off Previous releases - always broken: - sock: fix misuse of sk_under_memory_pressure() preventing system from exiting global TCP memory pressure if a single cgroup is under pressure - fix the RTO timer retransmitting skb every 1ms if linear option is enabled - af_key: fix sadb_x_filter validation, amment netlink policy - ipsec: fix slab-use-after-free in decode_session6() - macb: in ZynqMP resume always configure PS GTR for non-wakeup source Misc: - netfilter: set default timeout to 3 secs for sctp shutdown send and recv state (from 300ms), align with protocol timers Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmTemA4ACgkQMUZtbf5S IrtCThAAj+t35QM5BgGZowmrx9U4yF+kacDkdPztxlT8a/b+famrTtnZJ8USW+PF VCk3Eu8JXheuyAOMArHyM84/crS6wim6mzGcXaucusA3981PFzoqdgCLLf9emAJ2 j9vzKrnHBtdd5fj8Exwq70KN4CzXyrzRgqwr2EXBK9lH59HjX0+J7o+trbDxNmFK RZJE2oDCqf939iRGG3PhJryKYBmrQaMtdonNpSU5PiiRT0HnVYcEtdWcOXK7d53D onpoaPdawcsqsns5c5Qj01E1OdyM8X54BEGkl/S4FmSw5jF9Bp6btmTcxYYtdb7E M3CeYROZ0Kt8KcKKje/o1AzdGqWq8Hnxfwy+2WulZAHMucshg0JPm6Ev74WRondw NGYriKJSdORSO8idK9K/i7pnjZXYr9gU50lpPUFU+QzSdd+zv+U11arjAodwI9Wi pW+dFi3UR7J01LidaxclvHmWnZ7d5sSzE2khpqb0xd0+PagRGesl8qnKyoDJNS1P IHsOrRh9aXLzEZjud/rVG+sUobQvc1oiHW+hvbJ04GLKoli9U5poGT2fcaa4O67M T7JcN5oGDF+PIHJKgTEN7pfX2epY33gmofKUhbt/OPOqnvZOVbTu7/ojjuJZ8Lc5 SF8AvTe+lECcX8Htjq30PoVfai+FT6AhnZzK0H9K4HMfUB9O32Q= =Ze13 -----END PGP SIGNATURE----- Merge tag 'net-6.5-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from ipsec and netfilter. No known outstanding regressions. Fixes to fixes: - virtio-net: set queues after driver_ok, avoid a potential race added by recent fix - Revert "vlan: Fix VLAN 0 memory leak", it may lead to a warning when VLAN 0 is registered explicitly - nf_tables: - fix false-positive lockdep splat in recent fixes - don't fail inserts if duplicate has expired (fix test failures) - fix races between garbage collection and netns dismantle Current release - new code bugs: - mlx5: Fix mlx5_cmd_update_root_ft() error flow Previous releases - regressions: - phy: fix IRQ-based wake-on-lan over hibernate / power off Previous releases - always broken: - sock: fix misuse of sk_under_memory_pressure() preventing system from exiting global TCP memory pressure if a single cgroup is under pressure - fix the RTO timer retransmitting skb every 1ms if linear option is enabled - af_key: fix sadb_x_filter validation, amment netlink policy - ipsec: fix slab-use-after-free in decode_session6() - macb: in ZynqMP resume always configure PS GTR for non-wakeup source Misc: - netfilter: set default timeout to 3 secs for sctp shutdown send and recv state (from 300ms), align with protocol timers" * tag 'net-6.5-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (49 commits) ice: Block switchdev mode when ADQ is active and vice versa qede: fix firmware halt over suspend and resume net: do not allow gso_size to be set to GSO_BY_FRAGS sock: Fix misuse of sk_under_memory_pressure() sfc: don't fail probe if MAE/TC setup fails sfc: don't unregister flow_indr if it was never registered net: dsa: mv88e6xxx: Wait for EEPROM done before HW reset net/mlx5: Fix mlx5_cmd_update_root_ft() error flow net/mlx5e: XDP, Fix fifo overrun on XDP_REDIRECT i40e: fix misleading debug logs iavf: fix FDIR rule fields masks validation ipv6: fix indentation of a config attribute mailmap: add entries for Simon Horman broadcom: b44: Use b44_writephy() return value net: openvswitch: reject negative ifindex team: Fix incorrect deletion of ETH_P_8021AD protocol vid from slaves net: phy: broadcom: stub c45 read/write for 54810 netfilter: nft_dynset: disallow object maps netfilter: nf_tables: GC transaction race with netns dismantle netfilter: nf_tables: fix GC transaction races with netns and netlink event exit path ...
This commit is contained in:
commit
0e8860d212
2
.mailmap
2
.mailmap
@ -538,6 +538,8 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
|
||||
Sibi Sankar <quic_sibis@quicinc.com> <sibis@codeaurora.org>
|
||||
Sid Manning <quic_sidneym@quicinc.com> <sidneym@codeaurora.org>
|
||||
Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
|
||||
Simon Horman <horms@kernel.org> <simon.horman@corigine.com>
|
||||
Simon Horman <horms@kernel.org> <simon.horman@netronome.com>
|
||||
Simon Kelley <simon@thekelleys.org.uk>
|
||||
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
|
||||
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
|
||||
|
@ -178,10 +178,10 @@ nf_conntrack_sctp_timeout_established - INTEGER (seconds)
|
||||
Default is set to (hb_interval * path_max_retrans + rto_max)
|
||||
|
||||
nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds)
|
||||
default 0.3
|
||||
default 3
|
||||
|
||||
nf_conntrack_sctp_timeout_shutdown_recd - INTEGER (seconds)
|
||||
default 0.3
|
||||
default 3
|
||||
|
||||
nf_conntrack_sctp_timeout_shutdown_ack_sent - INTEGER (seconds)
|
||||
default 3
|
||||
|
@ -3034,6 +3034,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
|
||||
|
||||
/* If there is a GPIO connected to the reset pin, toggle it */
|
||||
if (gpiod) {
|
||||
/* If the switch has just been reset and not yet completed
|
||||
* loading EEPROM, the reset may interrupt the I2C transaction
|
||||
* mid-byte, causing the first EEPROM read after the reset
|
||||
* from the wrong location resulting in the switch booting
|
||||
* to wrong mode and inoperable.
|
||||
*/
|
||||
mv88e6xxx_g1_wait_eeprom_done(chip);
|
||||
|
||||
gpiod_set_value_cansleep(gpiod, 1);
|
||||
usleep_range(10000, 20000);
|
||||
gpiod_set_value_cansleep(gpiod, 0);
|
||||
|
@ -1793,11 +1793,9 @@ static int b44_nway_reset(struct net_device *dev)
|
||||
b44_readphy(bp, MII_BMCR, &bmcr);
|
||||
b44_readphy(bp, MII_BMCR, &bmcr);
|
||||
r = -EINVAL;
|
||||
if (bmcr & BMCR_ANENABLE) {
|
||||
b44_writephy(bp, MII_BMCR,
|
||||
bmcr | BMCR_ANRESTART);
|
||||
r = 0;
|
||||
}
|
||||
if (bmcr & BMCR_ANENABLE)
|
||||
r = b44_writephy(bp, MII_BMCR,
|
||||
bmcr | BMCR_ANRESTART);
|
||||
spin_unlock_irq(&bp->lock);
|
||||
|
||||
return r;
|
||||
|
@ -5194,6 +5194,9 @@ static int __maybe_unused macb_suspend(struct device *dev)
|
||||
unsigned int q;
|
||||
int err;
|
||||
|
||||
if (!device_may_wakeup(&bp->dev->dev))
|
||||
phy_exit(bp->sgmii_phy);
|
||||
|
||||
if (!netif_running(netdev))
|
||||
return 0;
|
||||
|
||||
@ -5254,7 +5257,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
|
||||
if (!(bp->wol & MACB_WOL_ENABLED)) {
|
||||
rtnl_lock();
|
||||
phylink_stop(bp->phylink);
|
||||
phy_exit(bp->sgmii_phy);
|
||||
rtnl_unlock();
|
||||
spin_lock_irqsave(&bp->lock, flags);
|
||||
macb_reset_hw(bp);
|
||||
@ -5284,6 +5286,9 @@ static int __maybe_unused macb_resume(struct device *dev)
|
||||
unsigned int q;
|
||||
int err;
|
||||
|
||||
if (!device_may_wakeup(&bp->dev->dev))
|
||||
phy_init(bp->sgmii_phy);
|
||||
|
||||
if (!netif_running(netdev))
|
||||
return 0;
|
||||
|
||||
@ -5344,8 +5349,6 @@ static int __maybe_unused macb_resume(struct device *dev)
|
||||
macb_set_rx_mode(netdev);
|
||||
macb_restore_features(bp);
|
||||
rtnl_lock();
|
||||
if (!device_may_wakeup(&bp->dev->dev))
|
||||
phy_init(bp->sgmii_phy);
|
||||
|
||||
phylink_start(bp->phylink);
|
||||
rtnl_unlock();
|
||||
|
@ -210,11 +210,11 @@ read_nvm_exit:
|
||||
* @hw: pointer to the HW structure.
|
||||
* @module_pointer: module pointer location in words from the NVM beginning
|
||||
* @offset: offset in words from module start
|
||||
* @words: number of words to write
|
||||
* @data: buffer with words to write to the Shadow RAM
|
||||
* @words: number of words to read
|
||||
* @data: buffer with words to read to the Shadow RAM
|
||||
* @last_command: tells the AdminQ that this is the last command
|
||||
*
|
||||
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
|
||||
* Reads a 16 bit words buffer to the Shadow RAM using the admin command.
|
||||
**/
|
||||
static int i40e_read_nvm_aq(struct i40e_hw *hw,
|
||||
u8 module_pointer, u32 offset,
|
||||
@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
|
||||
*/
|
||||
if ((offset + words) > hw->nvm.sr_size)
|
||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||
"NVM write error: offset %d beyond Shadow RAM limit %d\n",
|
||||
"NVM read error: offset %d beyond Shadow RAM limit %d\n",
|
||||
(offset + words), hw->nvm.sr_size);
|
||||
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
|
||||
/* We can write only up to 4KB (one sector), in one AQ write */
|
||||
/* We can read only up to 4KB (one sector), in one AQ write */
|
||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||
"NVM write fail error: tried to write %d words, limit is %d.\n",
|
||||
"NVM read fail error: tried to read %d words, limit is %d.\n",
|
||||
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
|
||||
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
|
||||
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
|
||||
/* A single write cannot spread over two sectors */
|
||||
/* A single read cannot spread over two sectors */
|
||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||
"NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
|
||||
"NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
|
||||
offset, words);
|
||||
else
|
||||
ret_code = i40e_aq_read_nvm(hw, module_pointer,
|
||||
|
@ -1289,6 +1289,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
|
||||
fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
|
||||
fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
|
||||
fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
|
||||
fltr->ip_ver = 4;
|
||||
break;
|
||||
case AH_V4_FLOW:
|
||||
case ESP_V4_FLOW:
|
||||
@ -1300,6 +1301,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
|
||||
fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
|
||||
fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
|
||||
fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
|
||||
fltr->ip_ver = 4;
|
||||
break;
|
||||
case IPV4_USER_FLOW:
|
||||
fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
|
||||
@ -1312,6 +1314,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
|
||||
fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
|
||||
fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
|
||||
fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
|
||||
fltr->ip_ver = 4;
|
||||
break;
|
||||
case TCP_V6_FLOW:
|
||||
case UDP_V6_FLOW:
|
||||
@ -1330,6 +1333,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
|
||||
fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
|
||||
fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
|
||||
fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
|
||||
fltr->ip_ver = 6;
|
||||
break;
|
||||
case AH_V6_FLOW:
|
||||
case ESP_V6_FLOW:
|
||||
@ -1345,6 +1349,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
|
||||
sizeof(struct in6_addr));
|
||||
fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
|
||||
fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
|
||||
fltr->ip_ver = 6;
|
||||
break;
|
||||
case IPV6_USER_FLOW:
|
||||
memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
|
||||
@ -1361,6 +1366,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
|
||||
fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
|
||||
fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
|
||||
fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
|
||||
fltr->ip_ver = 6;
|
||||
break;
|
||||
case ETHER_FLOW:
|
||||
fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
|
||||
@ -1371,6 +1377,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = iavf_validate_fdir_fltr_masks(adapter, fltr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (iavf_fdir_is_dup_fltr(adapter, fltr))
|
||||
return -EEXIST;
|
||||
|
||||
|
@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = {
|
||||
}
|
||||
};
|
||||
|
||||
static const struct in6_addr ipv6_addr_zero_mask = {
|
||||
.in6_u = {
|
||||
.u6_addr8 = {
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks
|
||||
* @adapter: pointer to the VF adapter structure
|
||||
* @fltr: Flow Director filter data structure
|
||||
*
|
||||
* Returns 0 if all masks of packet fields are either full or empty. Returns
|
||||
* error on at least one partial mask.
|
||||
*/
|
||||
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
|
||||
struct iavf_fdir_fltr *fltr)
|
||||
{
|
||||
if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX))
|
||||
goto partial_mask;
|
||||
|
||||
if (fltr->ip_ver == 4) {
|
||||
if (fltr->ip_mask.v4_addrs.src_ip &&
|
||||
fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX))
|
||||
goto partial_mask;
|
||||
|
||||
if (fltr->ip_mask.v4_addrs.dst_ip &&
|
||||
fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX))
|
||||
goto partial_mask;
|
||||
|
||||
if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX)
|
||||
goto partial_mask;
|
||||
} else if (fltr->ip_ver == 6) {
|
||||
if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask,
|
||||
sizeof(struct in6_addr)) &&
|
||||
memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
|
||||
sizeof(struct in6_addr)))
|
||||
goto partial_mask;
|
||||
|
||||
if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask,
|
||||
sizeof(struct in6_addr)) &&
|
||||
memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
|
||||
sizeof(struct in6_addr)))
|
||||
goto partial_mask;
|
||||
|
||||
if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX)
|
||||
goto partial_mask;
|
||||
}
|
||||
|
||||
if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX)
|
||||
goto partial_mask;
|
||||
|
||||
if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX))
|
||||
goto partial_mask;
|
||||
|
||||
if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX))
|
||||
goto partial_mask;
|
||||
|
||||
if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX))
|
||||
goto partial_mask;
|
||||
|
||||
if (fltr->ip_mask.l4_header &&
|
||||
fltr->ip_mask.l4_header != htonl(U32_MAX))
|
||||
goto partial_mask;
|
||||
|
||||
return 0;
|
||||
|
||||
partial_mask:
|
||||
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
|
||||
* @fltr: Flow Director filter data structure
|
||||
@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
|
||||
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
|
||||
}
|
||||
|
||||
fltr->ip_ver = 4;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
|
||||
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
|
||||
}
|
||||
|
||||
fltr->ip_ver = 6;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -110,6 +110,8 @@ struct iavf_fdir_fltr {
|
||||
struct virtchnl_fdir_add vc_add_msg;
|
||||
};
|
||||
|
||||
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
|
||||
struct iavf_fdir_fltr *fltr);
|
||||
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
|
||||
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
|
||||
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
|
||||
|
@ -538,6 +538,12 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
|
||||
break;
|
||||
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
|
||||
{
|
||||
if (ice_is_adq_active(pf)) {
|
||||
dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
|
||||
NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
|
||||
pf->hw.pf_id);
|
||||
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
|
||||
|
@ -8823,6 +8823,11 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
|
||||
ice_setup_tc_block_cb,
|
||||
np, np, true);
|
||||
case TC_SETUP_QDISC_MQPRIO:
|
||||
if (ice_is_eswitch_mode_switchdev(pf)) {
|
||||
netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (pf->adev) {
|
||||
mutex_lock(&pf->adev_mutex);
|
||||
device_lock(&pf->adev->dev);
|
||||
|
@ -55,7 +55,7 @@ static int octep_send_mbox_req(struct octep_device *oct,
|
||||
list_add_tail(&d->list, &oct->ctrl_req_wait_list);
|
||||
ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q,
|
||||
(d->done != 0),
|
||||
jiffies + msecs_to_jiffies(500));
|
||||
msecs_to_jiffies(500));
|
||||
list_del(&d->list);
|
||||
if (ret == 0 || ret == 1)
|
||||
return -EAGAIN;
|
||||
|
@ -1038,6 +1038,10 @@ static void octep_device_cleanup(struct octep_device *oct)
|
||||
{
|
||||
int i;
|
||||
|
||||
oct->poll_non_ioq_intr = false;
|
||||
cancel_delayed_work_sync(&oct->intr_poll_task);
|
||||
cancel_work_sync(&oct->ctrl_mbox_task);
|
||||
|
||||
dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
|
||||
|
||||
for (i = 0; i < OCTEP_MAX_VF; i++) {
|
||||
@ -1200,14 +1204,11 @@ static void octep_remove(struct pci_dev *pdev)
|
||||
if (!oct)
|
||||
return;
|
||||
|
||||
cancel_work_sync(&oct->tx_timeout_task);
|
||||
cancel_work_sync(&oct->ctrl_mbox_task);
|
||||
netdev = oct->netdev;
|
||||
if (netdev->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdev(netdev);
|
||||
|
||||
oct->poll_non_ioq_intr = false;
|
||||
cancel_delayed_work_sync(&oct->intr_poll_task);
|
||||
cancel_work_sync(&oct->tx_timeout_task);
|
||||
octep_device_cleanup(oct);
|
||||
pci_release_mem_regions(pdev);
|
||||
free_netdev(netdev);
|
||||
|
@ -84,6 +84,8 @@ enum mlx5e_xdp_xmit_mode {
|
||||
* MLX5E_XDP_XMIT_MODE_XSK:
|
||||
* none.
|
||||
*/
|
||||
#define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4
|
||||
|
||||
union mlx5e_xdp_info {
|
||||
enum mlx5e_xdp_xmit_mode mode;
|
||||
union {
|
||||
|
@ -1298,11 +1298,13 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
|
||||
{
|
||||
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
|
||||
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
|
||||
int entries = wq_sz * MLX5_SEND_WQEBB_NUM_DS * 2; /* upper bound for maximum num of
|
||||
* entries of all xmit_modes.
|
||||
*/
|
||||
int entries;
|
||||
size_t size;
|
||||
|
||||
/* upper bound for maximum num of entries of all xmit_modes. */
|
||||
entries = roundup_pow_of_two(wq_sz * MLX5_SEND_WQEBB_NUM_DS *
|
||||
MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO);
|
||||
|
||||
size = array_size(sizeof(*xdpi_fifo->xi), entries);
|
||||
xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
|
||||
if (!xdpi_fifo->xi)
|
||||
|
@ -245,12 +245,20 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
|
||||
mlx5_lag_is_shared_fdb(dev) &&
|
||||
mlx5_lag_is_master(dev)) {
|
||||
struct mlx5_core_dev *peer_dev;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
|
||||
err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect,
|
||||
(!disconnect) ? ft->id : 0);
|
||||
if (err && !disconnect) {
|
||||
mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) {
|
||||
if (j < i)
|
||||
mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1,
|
||||
ns->root_ft->id);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
|
||||
MLX5_SET(set_flow_table_root_in, in, table_id,
|
||||
ns->root_ft->id);
|
||||
|
@ -176,6 +176,15 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __maybe_unused qede_suspend(struct device *dev)
|
||||
{
|
||||
dev_info(dev, "Device does not support suspend operation\n");
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
|
||||
|
||||
static const struct pci_error_handlers qede_err_handler = {
|
||||
.error_detected = qede_io_error_detected,
|
||||
};
|
||||
@ -190,6 +199,7 @@ static struct pci_driver qede_pci_driver = {
|
||||
.sriov_configure = qede_sriov_configure,
|
||||
#endif
|
||||
.err_handler = &qede_err_handler,
|
||||
.driver.pm = &qede_pm_ops,
|
||||
};
|
||||
|
||||
static struct qed_eth_cb_ops qede_ll_ops = {
|
||||
|
@ -1194,7 +1194,7 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
|
||||
net_dev->features |= NETIF_F_HW_TC;
|
||||
efx->fixed_features |= NETIF_F_HW_TC;
|
||||
}
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ef100_probe_vf(struct efx_nic *efx)
|
||||
|
@ -1657,10 +1657,10 @@ int efx_init_tc(struct efx_nic *efx)
|
||||
rc = efx_tc_configure_fallback_acts_reps(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
efx->tc->up = true;
|
||||
rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
efx->tc->up = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -313,15 +313,21 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
|
||||
|
||||
pdev = of_find_device_by_node(pcs_np);
|
||||
of_node_put(pcs_np);
|
||||
if (!pdev || !platform_get_drvdata(pdev))
|
||||
if (!pdev || !platform_get_drvdata(pdev)) {
|
||||
if (pdev)
|
||||
put_device(&pdev->dev);
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
|
||||
miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
|
||||
if (!miic_port)
|
||||
if (!miic_port) {
|
||||
put_device(&pdev->dev);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
miic = platform_get_drvdata(pdev);
|
||||
device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
|
||||
put_device(&pdev->dev);
|
||||
|
||||
miic_port->miic = miic;
|
||||
miic_port->port = port - 1;
|
||||
|
@ -542,6 +542,17 @@ static int bcm54xx_resume(struct phy_device *phydev)
|
||||
return bcm54xx_config_init(phydev);
|
||||
}
|
||||
|
||||
static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
|
||||
u16 val)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int bcm54811_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int err, reg;
|
||||
@ -1103,6 +1114,8 @@ static struct phy_driver broadcom_drivers[] = {
|
||||
.get_strings = bcm_phy_get_strings,
|
||||
.get_stats = bcm54xx_get_stats,
|
||||
.probe = bcm54xx_phy_probe,
|
||||
.read_mmd = bcm54810_read_mmd,
|
||||
.write_mmd = bcm54810_write_mmd,
|
||||
.config_init = bcm54xx_config_init,
|
||||
.config_aneg = bcm5481_config_aneg,
|
||||
.config_intr = bcm_phy_config_intr,
|
||||
|
@ -3216,6 +3216,8 @@ static int phy_probe(struct device *dev)
|
||||
goto out;
|
||||
}
|
||||
|
||||
phy_disable_interrupts(phydev);
|
||||
|
||||
/* Start out supporting everything. Eventually,
|
||||
* a controller will attach, and may modify one
|
||||
* or both of these values
|
||||
@ -3333,16 +3335,6 @@ static int phy_remove(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void phy_shutdown(struct device *dev)
|
||||
{
|
||||
struct phy_device *phydev = to_phy_device(dev);
|
||||
|
||||
if (phydev->state == PHY_READY || !phydev->attached_dev)
|
||||
return;
|
||||
|
||||
phy_disable_interrupts(phydev);
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_driver_register - register a phy_driver with the PHY layer
|
||||
* @new_driver: new phy_driver to register
|
||||
@ -3376,7 +3368,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
|
||||
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
|
||||
new_driver->mdiodrv.driver.probe = phy_probe;
|
||||
new_driver->mdiodrv.driver.remove = phy_remove;
|
||||
new_driver->mdiodrv.driver.shutdown = phy_shutdown;
|
||||
new_driver->mdiodrv.driver.owner = owner;
|
||||
new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
|
||||
|
||||
|
@ -2200,7 +2200,9 @@ static void team_setup(struct net_device *dev)
|
||||
|
||||
dev->hw_features = TEAM_VLAN_FEATURES |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
NETIF_F_HW_VLAN_STAG_RX |
|
||||
NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
|
||||
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
|
||||
dev->features |= dev->hw_features;
|
||||
|
@ -1081,8 +1081,9 @@ static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
|
||||
err_xdp_ring:
|
||||
for (i--; i >= start; i--)
|
||||
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
|
||||
i = end;
|
||||
err_page_pool:
|
||||
for (i = start; i < end; i++) {
|
||||
for (i--; i >= start; i--) {
|
||||
page_pool_destroy(priv->rq[i].page_pool);
|
||||
priv->rq[i].page_pool = NULL;
|
||||
}
|
||||
|
@ -4219,8 +4219,6 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
if (vi->has_rss || vi->has_rss_hash_report)
|
||||
virtnet_init_default_rss(vi);
|
||||
|
||||
_virtnet_set_queues(vi, vi->curr_queue_pairs);
|
||||
|
||||
/* serialize netdev register + virtio_device_ready() with ndo_open() */
|
||||
rtnl_lock();
|
||||
|
||||
@ -4233,6 +4231,8 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
|
||||
_virtnet_set_queues(vi, vi->curr_queue_pairs);
|
||||
|
||||
/* a random MAC address has been assigned, notify the device.
|
||||
* We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
|
||||
* because many devices work fine without getting MAC explicitly
|
||||
|
@ -155,6 +155,10 @@ retry:
|
||||
if (gso_type & SKB_GSO_UDP)
|
||||
nh_off -= thlen;
|
||||
|
||||
/* Kernel has a special handling for GSO_BY_FRAGS. */
|
||||
if (gso_size == GSO_BY_FRAGS)
|
||||
return -EINVAL;
|
||||
|
||||
/* Too small packets are not really GSO ones. */
|
||||
if (skb->len - nh_off > gso_size) {
|
||||
shinfo->gso_size = gso_size;
|
||||
|
@ -534,6 +534,7 @@ struct nft_set_elem_expr {
|
||||
* @expr: stateful expression
|
||||
* @ops: set ops
|
||||
* @flags: set flags
|
||||
* @dead: set will be freed, never cleared
|
||||
* @genmask: generation mask
|
||||
* @klen: key length
|
||||
* @dlen: data length
|
||||
|
@ -1420,6 +1420,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
|
||||
return sk->sk_prot->memory_pressure != NULL;
|
||||
}
|
||||
|
||||
static inline bool sk_under_global_memory_pressure(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_prot->memory_pressure &&
|
||||
!!*sk->sk_prot->memory_pressure;
|
||||
}
|
||||
|
||||
static inline bool sk_under_memory_pressure(const struct sock *sk)
|
||||
{
|
||||
if (!sk->sk_prot->memory_pressure)
|
||||
|
@ -1984,6 +1984,7 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
|
||||
if (dev->xfrmdev_ops->xdo_dev_state_free)
|
||||
dev->xfrmdev_ops->xdo_dev_state_free(x);
|
||||
xso->dev = NULL;
|
||||
xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
|
||||
netdev_put(dev, &xso->dev_tracker);
|
||||
}
|
||||
}
|
||||
|
@ -384,7 +384,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
|
||||
dev->name);
|
||||
vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
|
||||
}
|
||||
if (event == NETDEV_DOWN)
|
||||
if (event == NETDEV_DOWN &&
|
||||
(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
|
||||
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
|
||||
|
||||
vlan_info = rtnl_dereference(dev->vlan_info);
|
||||
|
@ -3159,7 +3159,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
|
||||
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
|
||||
mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
|
||||
|
||||
if (sk_under_memory_pressure(sk) &&
|
||||
if (sk_under_global_memory_pressure(sk) &&
|
||||
(sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
|
||||
sk_leave_memory_pressure(sk);
|
||||
}
|
||||
|
@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_IP):
|
||||
xfrm_decode_session(skb, &fl, AF_INET);
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
xfrm_decode_session(skb, &fl, AF_INET);
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
xfrm_decode_session(skb, &fl, AF_INET6);
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
xfrm_decode_session(skb, &fl, AF_INET6);
|
||||
break;
|
||||
default:
|
||||
goto tx_err;
|
||||
|
@ -591,7 +591,9 @@ out_reset_timer:
|
||||
tcp_stream_is_thin(tp) &&
|
||||
icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
|
||||
icsk->icsk_backoff = 0;
|
||||
icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
|
||||
icsk->icsk_rto = clamp(__tcp_set_rto(tp),
|
||||
tcp_rto_min(sk),
|
||||
TCP_RTO_MAX);
|
||||
} else if (sk->sk_state != TCP_SYN_SENT ||
|
||||
icsk->icsk_backoff >
|
||||
READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
|
||||
|
@ -152,7 +152,7 @@ config INET6_TUNNEL
|
||||
default n
|
||||
|
||||
config IPV6_VTI
|
||||
tristate "Virtual (secure) IPv6: tunneling"
|
||||
tristate "Virtual (secure) IPv6: tunneling"
|
||||
select IPV6_TUNNEL
|
||||
select NET_IP_TUNNEL
|
||||
select XFRM
|
||||
|
@ -568,12 +568,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
vti6_addr_conflict(t, ipv6_hdr(skb)))
|
||||
goto tx_err;
|
||||
|
||||
xfrm_decode_session(skb, &fl, AF_INET6);
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
xfrm_decode_session(skb, &fl, AF_INET6);
|
||||
break;
|
||||
case htons(ETH_P_IP):
|
||||
xfrm_decode_session(skb, &fl, AF_INET);
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
xfrm_decode_session(skb, &fl, AF_INET);
|
||||
break;
|
||||
default:
|
||||
goto tx_err;
|
||||
|
@ -1848,9 +1848,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
|
||||
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
|
||||
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
|
||||
|
||||
if ((xfilter->sadb_x_filter_splen >=
|
||||
if ((xfilter->sadb_x_filter_splen >
|
||||
(sizeof(xfrm_address_t) << 3)) ||
|
||||
(xfilter->sadb_x_filter_dplen >=
|
||||
(xfilter->sadb_x_filter_dplen >
|
||||
(sizeof(xfrm_address_t) << 3))) {
|
||||
mutex_unlock(&pfk->dump_lock);
|
||||
return -EINVAL;
|
||||
|
@ -1876,6 +1876,7 @@ static int
|
||||
proc_do_sync_threshold(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct netns_ipvs *ipvs = table->extra2;
|
||||
int *valp = table->data;
|
||||
int val[2];
|
||||
int rc;
|
||||
@ -1885,6 +1886,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
|
||||
.mode = table->mode,
|
||||
};
|
||||
|
||||
mutex_lock(&ipvs->sync_mutex);
|
||||
memcpy(val, valp, sizeof(val));
|
||||
rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
|
||||
if (write) {
|
||||
@ -1894,6 +1896,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
|
||||
else
|
||||
memcpy(valp, val, sizeof(val));
|
||||
}
|
||||
mutex_unlock(&ipvs->sync_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -4321,6 +4324,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
|
||||
ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
|
||||
ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
|
||||
tbl[idx].data = &ipvs->sysctl_sync_threshold;
|
||||
tbl[idx].extra2 = ipvs;
|
||||
tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
|
||||
ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
|
||||
tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
|
||||
|
@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
|
||||
[SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
|
||||
[SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
|
||||
[SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
|
||||
[SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
|
||||
[SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
|
||||
[SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
|
||||
[SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
|
||||
[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
|
||||
[SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
|
||||
};
|
||||
@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
|
||||
{
|
||||
/* ORIGINAL */
|
||||
/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
|
||||
/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
|
||||
/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW},
|
||||
/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
|
||||
/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
|
||||
/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
|
||||
|
@ -7091,6 +7091,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
|
||||
ret = __nft_set_catchall_flush(ctx, set, &elem);
|
||||
if (ret < 0)
|
||||
break;
|
||||
nft_set_elem_change_active(ctx->net, set, ext);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -9480,9 +9481,14 @@ struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
|
||||
if (!trans)
|
||||
return NULL;
|
||||
|
||||
trans->net = maybe_get_net(net);
|
||||
if (!trans->net) {
|
||||
kfree(trans);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
refcount_inc(&set->refs);
|
||||
trans->set = set;
|
||||
trans->net = get_net(net);
|
||||
trans->seq = gc_seq;
|
||||
|
||||
return trans;
|
||||
@ -9738,6 +9744,22 @@ static void nft_set_commit_update(struct list_head *set_update_list)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int nft_gc_seq_begin(struct nftables_pernet *nft_net)
|
||||
{
|
||||
unsigned int gc_seq;
|
||||
|
||||
/* Bump gc counter, it becomes odd, this is the busy mark. */
|
||||
gc_seq = READ_ONCE(nft_net->gc_seq);
|
||||
WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
|
||||
|
||||
return gc_seq;
|
||||
}
|
||||
|
||||
static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq)
|
||||
{
|
||||
WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
|
||||
}
|
||||
|
||||
static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
struct nftables_pernet *nft_net = nft_pernet(net);
|
||||
@ -9823,9 +9845,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||
|
||||
WRITE_ONCE(nft_net->base_seq, base_seq);
|
||||
|
||||
/* Bump gc counter, it becomes odd, this is the busy mark. */
|
||||
gc_seq = READ_ONCE(nft_net->gc_seq);
|
||||
WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
|
||||
gc_seq = nft_gc_seq_begin(nft_net);
|
||||
|
||||
/* step 3. Start new generation, rules_gen_X now in use. */
|
||||
net->nft.gencursor = nft_gencursor_next(net);
|
||||
@ -10038,7 +10058,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
||||
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
|
||||
nf_tables_commit_audit_log(&adl, nft_net->base_seq);
|
||||
|
||||
WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
|
||||
nft_gc_seq_end(nft_net, gc_seq);
|
||||
nf_tables_commit_release(net);
|
||||
|
||||
return 0;
|
||||
@ -11039,6 +11059,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
|
||||
struct net *net = n->net;
|
||||
unsigned int deleted;
|
||||
bool restart = false;
|
||||
unsigned int gc_seq;
|
||||
|
||||
if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
|
||||
return NOTIFY_DONE;
|
||||
@ -11046,6 +11067,9 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
|
||||
nft_net = nft_pernet(net);
|
||||
deleted = 0;
|
||||
mutex_lock(&nft_net->commit_mutex);
|
||||
|
||||
gc_seq = nft_gc_seq_begin(nft_net);
|
||||
|
||||
if (!list_empty(&nf_tables_destroy_list))
|
||||
rcu_barrier();
|
||||
again:
|
||||
@ -11068,6 +11092,8 @@ again:
|
||||
if (restart)
|
||||
goto again;
|
||||
}
|
||||
nft_gc_seq_end(nft_net, gc_seq);
|
||||
|
||||
mutex_unlock(&nft_net->commit_mutex);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
@ -11105,12 +11131,20 @@ static void __net_exit nf_tables_pre_exit_net(struct net *net)
|
||||
static void __net_exit nf_tables_exit_net(struct net *net)
|
||||
{
|
||||
struct nftables_pernet *nft_net = nft_pernet(net);
|
||||
unsigned int gc_seq;
|
||||
|
||||
mutex_lock(&nft_net->commit_mutex);
|
||||
|
||||
gc_seq = nft_gc_seq_begin(nft_net);
|
||||
|
||||
if (!list_empty(&nft_net->commit_list) ||
|
||||
!list_empty(&nft_net->module_list))
|
||||
__nf_tables_abort(net, NFNL_ABORT_NONE);
|
||||
|
||||
__nft_release_tables(net);
|
||||
|
||||
nft_gc_seq_end(nft_net, gc_seq);
|
||||
|
||||
mutex_unlock(&nft_net->commit_mutex);
|
||||
WARN_ON_ONCE(!list_empty(&nft_net->tables));
|
||||
WARN_ON_ONCE(!list_empty(&nft_net->module_list));
|
||||
|
@ -191,6 +191,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
||||
if (IS_ERR(set))
|
||||
return PTR_ERR(set);
|
||||
|
||||
if (set->flags & NFT_SET_OBJECT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (set->ops->update == NULL)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -566,6 +566,8 @@ next_match:
|
||||
goto out;
|
||||
|
||||
if (last) {
|
||||
if (nft_set_elem_expired(&f->mt[b].e->ext))
|
||||
goto next_match;
|
||||
if ((genmask &&
|
||||
!nft_set_elem_active(&f->mt[b].e->ext, genmask)))
|
||||
goto next_match;
|
||||
@ -600,17 +602,8 @@ out:
|
||||
static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
|
||||
const struct nft_set_elem *elem, unsigned int flags)
|
||||
{
|
||||
struct nft_pipapo_elem *ret;
|
||||
|
||||
ret = pipapo_get(net, set, (const u8 *)elem->key.val.data,
|
||||
return pipapo_get(net, set, (const u8 *)elem->key.val.data,
|
||||
nft_genmask_cur(net));
|
||||
if (IS_ERR(ret))
|
||||
return ret;
|
||||
|
||||
if (nft_set_elem_expired(&ret->ext))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1549,7 +1542,7 @@ static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
|
||||
|
||||
/**
|
||||
* pipapo_gc() - Drop expired entries from set, destroy start and end elements
|
||||
* @set: nftables API set representation
|
||||
* @_set: nftables API set representation
|
||||
* @m: Matching data
|
||||
*/
|
||||
static void pipapo_gc(const struct nft_set *_set, struct nft_pipapo_match *m)
|
||||
@ -1697,6 +1690,17 @@ static void nft_pipapo_commit(const struct nft_set *set)
|
||||
priv->clone = new_clone;
|
||||
}
|
||||
|
||||
static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
|
||||
{
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
const struct net *net = read_pnet(&set->net);
|
||||
|
||||
return lockdep_is_held(&nft_pernet(net)->commit_mutex);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void nft_pipapo_abort(const struct nft_set *set)
|
||||
{
|
||||
struct nft_pipapo *priv = nft_set_priv(set);
|
||||
@ -1705,7 +1709,7 @@ static void nft_pipapo_abort(const struct nft_set *set)
|
||||
if (!priv->dirty)
|
||||
return;
|
||||
|
||||
m = rcu_dereference(priv->match);
|
||||
m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set));
|
||||
|
||||
new_clone = pipapo_clone(m);
|
||||
if (IS_ERR(new_clone))
|
||||
@ -1732,11 +1736,7 @@ static void nft_pipapo_activate(const struct net *net,
|
||||
const struct nft_set *set,
|
||||
const struct nft_set_elem *elem)
|
||||
{
|
||||
struct nft_pipapo_elem *e;
|
||||
|
||||
e = pipapo_get(net, set, (const u8 *)elem->key.val.data, 0);
|
||||
if (IS_ERR(e))
|
||||
return;
|
||||
struct nft_pipapo_elem *e = elem->priv;
|
||||
|
||||
nft_set_elem_change_active(net, set, &e->ext);
|
||||
}
|
||||
@ -1950,10 +1950,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
|
||||
|
||||
data = (const u8 *)nft_set_ext_key(&e->ext);
|
||||
|
||||
e = pipapo_get(net, set, data, 0);
|
||||
if (IS_ERR(e))
|
||||
return;
|
||||
|
||||
while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
|
||||
union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
|
||||
const u8 *match_start, *match_end;
|
||||
|
@ -1829,7 +1829,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
|
||||
parms.port_no = OVSP_LOCAL;
|
||||
parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
|
||||
parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX]
|
||||
? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0;
|
||||
? nla_get_s32(a[OVS_DP_ATTR_IFINDEX]) : 0;
|
||||
|
||||
/* So far only local changes have been made, now need the lock. */
|
||||
ovs_lock();
|
||||
@ -2049,7 +2049,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
|
||||
[OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
|
||||
[OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0,
|
||||
PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
|
||||
[OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 },
|
||||
[OVS_DP_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
|
||||
};
|
||||
|
||||
static const struct genl_small_ops dp_datapath_genl_ops[] = {
|
||||
@ -2302,7 +2302,7 @@ restart:
|
||||
parms.port_no = port_no;
|
||||
parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
|
||||
parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX]
|
||||
? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
|
||||
? nla_get_s32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
|
||||
|
||||
vport = new_vport(&parms);
|
||||
err = PTR_ERR(vport);
|
||||
@ -2539,7 +2539,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
|
||||
[OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
|
||||
[OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
|
||||
[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
|
||||
[OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
|
||||
[OVS_VPORT_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
|
||||
[OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
|
||||
[OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
|
||||
[XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
|
||||
[XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
|
||||
[XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
|
||||
[XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
|
||||
[XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
|
||||
[XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
|
||||
[XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
|
||||
[XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
|
||||
|
@ -180,6 +180,8 @@ static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
|
||||
int optlen = 0;
|
||||
int err = -EINVAL;
|
||||
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
|
||||
struct ip_beet_phdr *ph;
|
||||
int phlen;
|
||||
@ -232,6 +234,8 @@ static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||
goto out;
|
||||
|
||||
@ -267,6 +271,8 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
|
||||
goto out;
|
||||
|
||||
@ -296,6 +302,8 @@ static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
|
||||
int size = sizeof(struct ipv6hdr);
|
||||
int err;
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
|
||||
err = skb_cow_head(skb, size + skb->mac_len);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -346,6 +354,7 @@ xfrm_inner_mode_encap_remove(struct xfrm_state *x,
|
||||
return xfrm6_remove_tunnel_encap(x, skb);
|
||||
break;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
@ -366,19 +375,6 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
return -EAFNOSUPPORT;
|
||||
}
|
||||
|
||||
switch (XFRM_MODE_SKB_CB(skb)->protocol) {
|
||||
case IPPROTO_IPIP:
|
||||
case IPPROTO_BEETPH:
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
break;
|
||||
case IPPROTO_IPV6:
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
}
|
||||
|
||||
return xfrm_inner_mode_encap_remove(x, skb);
|
||||
}
|
||||
|
||||
|
@ -537,8 +537,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_IPV6):
|
||||
xfrm_decode_session(skb, &fl, AF_INET6);
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
xfrm_decode_session(skb, &fl, AF_INET6);
|
||||
if (!dst) {
|
||||
fl.u.ip6.flowi6_oif = dev->ifindex;
|
||||
fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
|
||||
@ -552,8 +552,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
break;
|
||||
case htons(ETH_P_IP):
|
||||
xfrm_decode_session(skb, &fl, AF_INET);
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
xfrm_decode_session(skb, &fl, AF_INET);
|
||||
if (!dst) {
|
||||
struct rtable *rt;
|
||||
|
||||
|
@ -1324,12 +1324,8 @@ found:
|
||||
struct xfrm_dev_offload *xso = &x->xso;
|
||||
|
||||
if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
|
||||
xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
|
||||
xso->dir = 0;
|
||||
netdev_put(xso->dev, &xso->dev_tracker);
|
||||
xso->dev = NULL;
|
||||
xso->real_dev = NULL;
|
||||
xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
|
||||
xfrm_dev_state_delete(x);
|
||||
xfrm_dev_state_free(x);
|
||||
}
|
||||
#endif
|
||||
x->km.state = XFRM_STATE_DEAD;
|
||||
|
@ -628,7 +628,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
|
||||
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
|
||||
struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
|
||||
|
||||
if (re) {
|
||||
if (re && x->replay_esn && x->preplay_esn) {
|
||||
struct xfrm_replay_state_esn *replay_esn;
|
||||
replay_esn = nla_data(re);
|
||||
memcpy(x->replay_esn, replay_esn,
|
||||
@ -1267,6 +1267,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
sizeof(*filter), GFP_KERNEL);
|
||||
if (filter == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* see addr_match(), (prefix length >> 5) << 2
|
||||
* will be used to compare xfrm_address_t
|
||||
*/
|
||||
if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
|
||||
filter->dplen > (sizeof(xfrm_address_t) << 3)) {
|
||||
kfree(filter);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (attrs[XFRMA_PROTO])
|
||||
@ -2336,6 +2345,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
NETLINK_CB(skb).portid);
|
||||
}
|
||||
} else {
|
||||
xfrm_dev_policy_delete(xp);
|
||||
xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
|
||||
|
||||
if (err != 0)
|
||||
@ -3015,7 +3025,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
|
||||
[XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
|
||||
[XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
|
||||
[XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
|
||||
[XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
|
||||
[XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
|
||||
[XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
|
||||
[XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
|
||||
[XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
|
||||
@ -3035,6 +3045,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
|
||||
[XFRMA_SET_MARK] = { .type = NLA_U32 },
|
||||
[XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
|
||||
[XFRMA_IF_ID] = { .type = NLA_U32 },
|
||||
[XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(xfrma_policy);
|
||||
|
||||
|
@ -72,7 +72,8 @@ test_span_gre_ttl()
|
||||
|
||||
RET=0
|
||||
|
||||
mirror_install $swp1 ingress $tundev "matchall $tcflags"
|
||||
mirror_install $swp1 ingress $tundev \
|
||||
"prot ip flower $tcflags ip_prot icmp"
|
||||
tc filter add dev $h3 ingress pref 77 prot $prot \
|
||||
flower skip_hw ip_ttl 50 action pass
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user