Networking fixes for 6.2-rc8, including fixes from can and

ipsec subtrees
 
 Current release - regressions:
 
  - sched: fix off by one in htb_activate_prios()
 
  - eth: mana: fix accessing freed irq affinity_hint
 
  - eth: ice: fix out-of-bounds KASAN warning in virtchnl
 
 Current release - new code bugs:
 
  - eth: mtk_eth_soc: enable special tag when any MAC uses DSA
 
 Previous releases - always broken:
 
  - core: fix sk->sk_txrehash default
 
  - neigh: make sure used and confirmed times are valid
 
  - mptcp: be careful on subflow status propagation on errors
 
  - xfrm: prevent potential spectre v1 gadget in xfrm_xlate32_attr()
 
  - phylink: move phy_device_free() to correctly release phy device
 
  - eth: mlx5:
    - fix crash unsetting rx-vlan-filter in switchdev mode
    - fix hang on firmware reset
    - serialize module cleanup with reload and remove
 
 Signed-off-by: Paolo Abeni <pabeni@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmPlBMISHHBhYmVuaUBy
 ZWRoYXQuY29tAAoJECkkeY3MjxOkAZsQAJl3UBV0kKfxcuNNf4Qwrphpcg4TiZzC
 wD21ebpjG5MzULQ/r0J0Ry3ZlQJmLMs6l2zM3QO6U6obHxzSQ5BrhCHIv4lw6ODC
 Zfv3a82wVVTd123Zykx+bd8jz2uV5nIps5MtBmf7Dqm1ldVvpNAOnXSxVi52UCk5
 gXDYtYqGmO+ZU1IUJxUrRoPPCTGEnfRQvU23Cm7WgzUHjDh7yKihdRVajLKUxVq4
 9mEE9mwu7tf7AVMQ/oMCCPyh8PVAXkzCEoCm5V9MO4CApS4jgSGkX3OEjDKtEwUA
 efhES/3OLNCaPXauc6xkudXzooYoYpa1L0m/91pIHw4+ur3pueDpmBYfpC9cmPy6
 JJtpT47RWGSXaRiP8RpzGKhKhTf33T9lGYm6PRelUr+x/dnaM87u+IwKDr0nb9+H
 P0MnoI8pcaLpEFlwpTzGiNS8vJw1bC5NBTZuoSSJSxT8ItRwgzh7txfHAveR3etW
 Nue726RG/9rTyukG7zBdZVpqffe8r2mybar82Ns+zIsU8x4ZKTNj7WIhMNx0tL0/
 26tS5BdL4DqkDKFFJ+HJDXLpYCt7KWvG1k78xBdBBsQ/CjcBi3Nhk7NyMni5FTsa
 qnZd1OR4ruUHeFDqz+3q/PfxLncfRr/ZlSIF1ogq3ij5M0CoxwAWSrgh4tch7XsL
 YtdbL1xpTSk/
 =sJO+
 -----END PGP SIGNATURE-----

Merge tag 'net-6.2-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
 "Including fixes from can and ipsec subtrees.

  Current release - regressions:

   - sched: fix off by one in htb_activate_prios()

   - eth: mana: fix accessing freed irq affinity_hint

   - eth: ice: fix out-of-bounds KASAN warning in virtchnl

  Current release - new code bugs:

   - eth: mtk_eth_soc: enable special tag when any MAC uses DSA

  Previous releases - always broken:

   - core: fix sk->sk_txrehash default

   - neigh: make sure used and confirmed times are valid

   - mptcp: be careful on subflow status propagation on errors

   - xfrm: prevent potential spectre v1 gadget in xfrm_xlate32_attr()

   - phylink: move phy_device_free() to correctly release phy device

   - eth: mlx5:
      - fix crash unsetting rx-vlan-filter in switchdev mode
      - fix hang on firmware reset
      - serialize module cleanup with reload and remove"

* tag 'net-6.2-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (57 commits)
  selftests: forwarding: lib: quote the sysctl values
  net: mscc: ocelot: fix all IPv6 getting trapped to CPU when PTP timestamping is used
  rds: rds_rm_zerocopy_callback() use list_first_entry()
  net: txgbe: Update support email address
  selftests: Fix failing VXLAN VNI filtering test
  selftests: mptcp: stop tests earlier
  selftests: mptcp: allow more slack for slow test-case
  mptcp: be careful on subflow status propagation on errors
  mptcp: fix locking for in-kernel listener creation
  mptcp: fix locking for setsockopt corner-case
  mptcp: do not wait for bare sockets' timeout
  net: ethernet: mtk_eth_soc: fix DSA TX tag hwaccel for switch port 0
  nfp: ethtool: fix the bug of setting unsupported port speed
  txhash: fix sk->sk_txrehash default
  net: ethernet: mtk_eth_soc: fix wrong parameters order in __xdp_rxq_info_reg()
  net: ethernet: mtk_eth_soc: enable special tag when any MAC uses DSA
  net: sched: sch: Fix off by one in htb_activate_prios()
  igc: Add ndo_tx_timeout support
  net: mana: Fix accessing freed irq affinity_hint
  hv_netvsc: Allocate memory in netvsc_dma_map() with GFP_ATOMIC
  ...
This commit is contained in:
Linus Torvalds 2023-02-09 09:17:38 -08:00
commit 35674e7875
65 changed files with 798 additions and 353 deletions

View File

@ -16,5 +16,5 @@ Contents
Support
=======
If you got any problem, contact Wangxun support team via support@trustnetic.com
If you got any problem, contact Wangxun support team via nic-support@net-swift.com
and Cc: netdev.

View File

@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
if (d) {
if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");

View File

@ -1309,14 +1309,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
}
/* Set the port as a user port which is to be able to recognize VID
* from incoming packets before fetching entry within the VLAN table.
*/
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
VLAN_ATTR(MT7530_VLAN_USER) |
PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
/* Set the port as a user port which is to be able to recognize
* VID from incoming packets before fetching entry within the
* VLAN table.
*/
mt7530_rmw(priv, MT7530_PVC_P(port),
VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
VLAN_ATTR(MT7530_VLAN_USER) |
PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
} else {
/* Also set CPU ports to the "user" VLAN port attribute, to
* allow VLAN classification, but keep the EG_TAG attribute as
* "consistent" (i.o.w. don't change its value) for packets
* received by the switch from the CPU, so that tagged packets
* are forwarded to user ports as tagged, and untagged as
* untagged.
*/
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
VLAN_ATTR(MT7530_VLAN_USER));
}
}
static void

View File

@ -4627,25 +4627,26 @@ static int init_reset_optional(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to init SGMII PHY\n");
}
ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
if (!ret) {
u32 pm_info[2];
ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
if (!ret) {
u32 pm_info[2];
ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
pm_info, ARRAY_SIZE(pm_info));
if (ret) {
dev_err(&pdev->dev, "Failed to read power management information\n");
goto err_out_phy_exit;
ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
pm_info, ARRAY_SIZE(pm_info));
if (ret) {
dev_err(&pdev->dev, "Failed to read power management information\n");
goto err_out_phy_exit;
}
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
if (ret)
goto err_out_phy_exit;
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
if (ret)
goto err_out_phy_exit;
}
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
if (ret)
goto err_out_phy_exit;
ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
if (ret)
goto err_out_phy_exit;
}
/* Fully reset controller at hardware level if mapped in device tree */

View File

@ -5524,7 +5524,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
* returned by the firmware is a 16 bit * value, but is indexed
* by [fls(speed) - 1]
*/
static const u32 ice_aq_to_link_speed[15] = {
static const u32 ice_aq_to_link_speed[] = {
SPEED_10, /* BIT(0) */
SPEED_100,
SPEED_1000,
@ -5536,10 +5536,6 @@ static const u32 ice_aq_to_link_speed[15] = {
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
0,
0,
0,
0 /* BIT(14) */
};
/**
@ -5550,5 +5546,8 @@ static const u32 ice_aq_to_link_speed[15] = {
*/
u32 ice_get_link_speed(u16 index)
{
if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
return 0;
return ice_aq_to_link_speed[index];
}

View File

@ -5541,7 +5541,7 @@ static int __init ice_module_init(void)
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;

View File

@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
goto err_free_lkup_exts;
goto err_unroll;
/* Group match words into recipes using preferred recipe grouping
* criteria.

View File

@ -1681,7 +1681,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
struct ice_vsi *ch_vsi = NULL;
u16 queue = act->rx_queue;
if (queue > vsi->num_rxq) {
if (queue >= vsi->num_rxq) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because specified queue is invalid");
return -EINVAL;

View File

@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
static const u32 ice_legacy_aq_to_vc_speed[15] = {
static const u32 ice_legacy_aq_to_vc_speed[] = {
VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
VIRTCHNL_LINK_SPEED_100MB,
VIRTCHNL_LINK_SPEED_1GB,
@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN,
VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
u32 speed;
/* convert a BIT() value into an array index */
u32 index = fls(link_speed) - 1;
if (adv_link_support) {
/* convert a BIT() value into an array index */
speed = ice_get_link_speed(fls(link_speed) - 1);
} else {
if (adv_link_support)
return ice_get_link_speed(index);
else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
}
return ice_legacy_aq_to_vc_speed[index];
return speed;
return VIRTCHNL_LINK_SPEED_UNKNOWN;
}
/* The mailbox overflow detection algorithm helps to check if there

View File

@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
/* outer VLAN ops regardless of port VLAN config */
vlan_ops->add_vlan = ice_vsi_add_vlan;
vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
if (ice_vf_is_port_vlan_ena(vf)) {
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
/* all Rx traffic should be in the domain of the
* assigned port VLAN, so prevent disabling Rx VLAN
* filtering
*/
vlan_ops->dis_rx_filtering = noop_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
} else {
vlan_ops->dis_rx_filtering =
ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
/* all Rx traffic should be in the domain of the
* assigned port VLAN, so prevent disabling Rx VLAN
* filtering
*/
vlan_ops->dis_rx_filtering = noop_vlan;
} else {
vlan_ops->dis_rx_filtering =
ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else

View File

@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
!(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
!(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
(rd32(IGC_TDH(tx_ring->reg_idx)) !=
readl(tx_ring->tail))) {
/* detected Tx unit hang */
netdev_err(tx_ring->netdev,
"Detected Tx Unit Hang\n"
@ -5068,6 +5070,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
/**
* igc_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
* @txqueue: queue number that timed out
**/
static void igc_tx_timeout(struct net_device *netdev,
unsigned int __always_unused txqueue)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
/* Do the reset outside of interrupt context */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
wr32(IGC_EICS,
(adapter->eims_enable_mask & ~adapter->eims_other));
}
/**
* igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work)
case SPEED_100:
case SPEED_1000:
case SPEED_2500:
adapter->tx_timeout_factor = 7;
adapter->tx_timeout_factor = 1;
break;
}
@ -6320,6 +6340,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_tx_timeout = igc_tx_timeout,
.ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,

View File

@ -1570,8 +1570,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
id, PAGE_SIZE);
err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@ -1870,7 +1870,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
while (done < budget) {
unsigned int pktlen, *rxdcsum;
bool has_hwaccel_tag = false;
struct net_device *netdev;
u16 vlan_proto, vlan_tci;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@ -2010,27 +2012,29 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
if (trxd.rxd3 & RX_DMA_VTAG_V2)
__vlan_hwaccel_put_tag(skb,
htons(RX_DMA_VPID(trxd.rxd4)),
RX_DMA_VID(trxd.rxd4));
if (trxd.rxd3 & RX_DMA_VTAG_V2) {
vlan_proto = RX_DMA_VPID(trxd.rxd4);
vlan_tci = RX_DMA_VID(trxd.rxd4);
has_hwaccel_tag = true;
}
} else if (trxd.rxd2 & RX_DMA_VTAG) {
__vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
RX_DMA_VID(trxd.rxd3));
vlan_proto = RX_DMA_VPID(trxd.rxd3);
vlan_tci = RX_DMA_VID(trxd.rxd3);
has_hwaccel_tag = true;
}
}
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
unsigned int port = vlan_proto & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
eth->dsa_meta[port])
skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
__vlan_hwaccel_clear_tag(skb);
} else if (has_hwaccel_tag) {
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
}
skb_record_rx_queue(skb, 0);
@ -3111,7 +3115,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
@ -3177,8 +3181,7 @@ static int mtk_open(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
int i, err;
if ((mtk_uses_dsa(dev) && !eth->prog) &&
!(mac->id == 1 && MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_TRGMII))) {
if (mtk_uses_dsa(dev) && !eth->prog) {
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
struct metadata_dst *md_dst = eth->dsa_meta[i];
@ -3195,8 +3198,7 @@ static int mtk_open(struct net_device *dev)
}
} else {
/* Hardware special tag parsing needs to be disabled if at least
* one MAC does not use DSA, or the second MAC of the MT7621 and
* MT7623 SoCs is being used.
* one MAC does not use DSA.
*/
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
val &= ~MTK_CDMP_STAG_EN;

View File

@ -245,8 +245,9 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
pages = dev->priv.dbg.pages_debugfs;
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,

View File

@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
MLX5_GET(mtrc_cap, out, num_string_trace);
tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
tracer->str_db.loaded = false;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
@ -756,6 +757,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
tracer->buff.consumer_index = 0;
return err;
}
@ -820,7 +822,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
tracer->owner = false;
tracer->buff.consumer_index = 0;
return;
}

View File

@ -87,7 +87,7 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_host_pf_cleanup(dev);
err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
}

View File

@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
/* only handle the event on native eswtich of representor */
if (!mlx5_esw_bridge_is_local(dev, rep, esw))
break;
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);

View File

@ -443,7 +443,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
if (fs->vlan->cvlan_filter_disabled)
if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
return;
fs->vlan->cvlan_filter_disabled = true;

View File

@ -591,7 +591,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->hw_mtu =
MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
rq->xdpsq = &c->rq_xdpsq;
rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
@ -1014,35 +1015,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
return mlx5e_rq_to_ready(rq, curr_state);
}
static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
MLX5_SET(rqc, rqc, scatter_fcs, enable);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
return err;
}
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
struct mlx5_core_dev *mdev = rq->mdev;
@ -3314,20 +3286,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
{
int err = 0;
int i;
for (i = 0; i < chs->num; i++) {
err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
if (err)
return err;
}
return 0;
}
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
int err;
@ -3903,41 +3861,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
{
struct mlx5_core_dev *mdev = priv->mdev;
bool enable = *(bool *)ctx;
return mlx5e_set_rx_port_ts(mdev, enable);
}
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *chs = &priv->channels;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
int err;
mutex_lock(&priv->state_lock);
if (enable) {
err = mlx5e_set_rx_port_ts(mdev, false);
if (err)
goto out;
chs->params.scatter_fcs_en = true;
err = mlx5e_modify_channels_scatter_fcs(chs, true);
if (err) {
chs->params.scatter_fcs_en = false;
mlx5e_set_rx_port_ts(mdev, true);
}
} else {
chs->params.scatter_fcs_en = false;
err = mlx5e_modify_channels_scatter_fcs(chs, false);
if (err) {
chs->params.scatter_fcs_en = true;
goto out;
}
err = mlx5e_set_rx_port_ts(mdev, true);
if (err) {
mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
err = 0;
}
}
out:
new_params = chs->params;
new_params.scatter_fcs_en = enable;
err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
&new_params.scatter_fcs_en, true);
mutex_unlock(&priv->state_lock);
return err;
}
@ -4074,6 +4018,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
return features;
}

View File

@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct mlx5_esw_bridge *bridge;
port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
if (!port)
return;
bridge = port->bridge;

View File

@ -189,16 +189,16 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
}
}
static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
if (rate < 0)
return -EINVAL;
return SPEED_UNKNOWN;
width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
if (width < 0)
return -EINVAL;
return SPEED_UNKNOWN;
return rate * width;
}
@ -221,16 +221,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
if (speed < 0)
return -EINVAL;
link_ksettings->base.speed = speed;
link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
link_ksettings->base.duplex = DUPLEX_FULL;
link_ksettings->base.port = PORT_OTHER;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
link_ksettings->base.speed = speed;
return 0;
}

View File

@ -2110,7 +2110,7 @@ static int __init mlx5_init(void)
mlx5_core_verify_params();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
err = mlx5e_init();
if (err)
goto err_debug;
@ -2118,16 +2118,16 @@ static int __init mlx5_init(void)
if (err)
goto err_sf;
err = mlx5e_init();
err = pci_register_driver(&mlx5_core_driver);
if (err)
goto err_en;
goto err_pci;
return 0;
err_en:
err_pci:
mlx5_sf_driver_unregister();
err_sf:
pci_unregister_driver(&mlx5_core_driver);
mlx5e_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
@ -2135,9 +2135,9 @@ err_debug:
static void __exit mlx5_cleanup(void)
{
mlx5e_cleanup();
mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
mlx5_sf_driver_unregister();
mlx5e_cleanup();
mlx5_unregister_debugfs();
}

View File

@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
return (u32)func_id | (ec_function << 16);
}
static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
{
if (!func_id)
return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
}
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
@ -332,6 +340,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
int notify_fail = event;
u16 func_type;
u64 addr;
int err;
u32 *in;
@ -383,11 +392,9 @@ retry:
goto out_dropped;
}
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] += npages;
dev->priv.fw_pages += npages;
if (func_id)
dev->priv.vfs_pages += npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
@ -414,6 +421,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
struct rb_root *root;
struct rb_node *p;
int npages = 0;
u16 func_type;
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
@ -428,11 +436,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
free_fwp(dev, fwp, fwp->free_count);
}
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] -= npages;
dev->priv.fw_pages -= npages;
if (func_id)
dev->priv.vfs_pages -= npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id);
@ -498,6 +504,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
u16 func_type;
u32 *out;
int err;
int i;
@ -549,11 +556,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (nclaimed)
*nclaimed = num_claimed;
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] -= num_claimed;
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
dev->priv.host_pf_pages -= num_claimed;
out_free:
kvfree(out);
@ -706,12 +711,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
WARN(dev->priv.vfs_pages,
WARN(dev->priv.page_counters[MLX5_VF],
"VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages);
WARN(dev->priv.host_pf_pages,
dev->priv.page_counters[MLX5_VF]);
WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.host_pf_pages);
dev->priv.page_counters[MLX5_HOST_PF]);
return 0;
}

View File

@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}

View File

@ -1138,12 +1138,14 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
rule->flow_source))
return 0;
mlx5dr_domain_nic_lock(nic_dmn);
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
return ret;
goto err_unlock;
hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
if (likely(hw_ste_arr_is_opt)) {
@ -1152,12 +1154,12 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
DR_STE_SIZE, GFP_KERNEL);
if (!hw_ste_arr)
return -ENOMEM;
if (!hw_ste_arr) {
ret = -ENOMEM;
goto err_unlock;
}
}
mlx5dr_domain_nic_lock(nic_dmn);
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
@ -1223,7 +1225,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
goto out;
if (unlikely(!hw_ste_arr_is_opt))
kfree(hw_ste_arr);
return 0;
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
@ -1238,12 +1243,12 @@ remove_from_nic_tbl:
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
out:
if (unlikely(!hw_ste_arr_is_opt))
if (!hw_ste_arr_is_opt)
kfree(hw_ste_arr);
err_unlock:
mlx5dr_domain_nic_unlock(nic_dmn);
return ret;
}

View File

@ -633,7 +633,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
/* Enable master counters */
spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
for (i = 0; i < sparx5->port_count; i++) {
for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
@ -649,7 +649,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
for (i = 0; i < sparx5->port_count; i++) {
for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;

View File

@ -1217,9 +1217,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_irq_context *gic;
unsigned int max_irqs;
u16 *cpus;
cpumask_var_t req_mask;
unsigned int max_irqs, cpu;
int nvec, irq;
int err, i = 0, j;
@ -1240,21 +1238,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq_vector;
}
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
err = -ENOMEM;
goto free_irq;
}
cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
if (!cpus) {
err = -ENOMEM;
goto free_mask;
}
for (i = 0; i < nvec; i++)
cpus[i] = cpumask_local_spread(i, gc->numa_node);
for (i = 0; i < nvec; i++) {
cpumask_set_cpu(cpus[i], req_mask);
gic = &gc->irq_contexts[i];
gic->handler = NULL;
gic->arg = NULL;
@ -1269,17 +1253,16 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq = pci_irq_vector(pdev, i);
if (irq < 0) {
err = irq;
goto free_mask;
goto free_irq;
}
err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
if (err)
goto free_mask;
irq_set_affinity_and_hint(irq, req_mask);
cpumask_clear(req_mask);
goto free_irq;
cpu = cpumask_local_spread(i, gc->numa_node);
irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
free_cpumask_var(req_mask);
kfree(cpus);
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
if (err)
@ -1290,13 +1273,12 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
return 0;
free_mask:
free_cpumask_var(req_mask);
kfree(cpus);
free_irq:
for (j = i - 1; j >= 0; j--) {
irq = pci_irq_vector(pdev, j);
gic = &gc->irq_contexts[j];
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
@ -1324,6 +1306,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
continue;
gic = &gc->irq_contexts[i];
/* Need to clear the hint before free_irq */
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}

View File

@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
flow_rule_match_control(rule, &match);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
filter->key_type = OCELOT_VCAP_KEY_ANY;
filter->vlan.vid.value = match.key->vlan_id;
filter->vlan.vid.mask = match.mask->vlan_id;
filter->vlan.pcp.value[0] = match.key->vlan_priority;
filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
filter->key_type = OCELOT_VCAP_KEY_ANY;
filter->vlan.vid.value = match.key->vlan_id;
filter->vlan.vid.mask = match.mask->vlan_id;
filter->vlan.pcp.value[0] = match.key->vlan_priority;
filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
match_protocol = false;
}
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
if (filter->block_id == VCAP_ES0) {

View File

@ -335,8 +335,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@ -355,8 +355,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}

View File

@ -293,35 +293,131 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
}
}
static const u16 nfp_eth_media_table[] = {
[NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
[NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
[NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
[NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
[NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
[NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
[NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
[NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
[NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
[NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
[NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
[NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
[NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
[NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
[NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
[NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
[NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
[NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
[NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
[NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
[NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
[NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
[NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
[NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
[NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
[NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
[NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
[NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
static const struct nfp_eth_media_link_mode {
u16 ethtool_link_mode;
u16 speed;
} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
[NFP_MEDIA_1000BASE_CX] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = NFP_SPEED_1G,
},
[NFP_MEDIA_1000BASE_KX] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = NFP_SPEED_1G,
},
[NFP_MEDIA_10GBASE_KX4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
.speed = NFP_SPEED_10G,
},
[NFP_MEDIA_10GBASE_KR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
.speed = NFP_SPEED_10G,
},
[NFP_MEDIA_10GBASE_CX4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
.speed = NFP_SPEED_10G,
},
[NFP_MEDIA_10GBASE_CR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
.speed = NFP_SPEED_10G,
},
[NFP_MEDIA_10GBASE_SR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
.speed = NFP_SPEED_10G,
},
[NFP_MEDIA_10GBASE_ER] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
.speed = NFP_SPEED_10G,
},
[NFP_MEDIA_25GBASE_KR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
.speed = NFP_SPEED_25G,
},
[NFP_MEDIA_25GBASE_KR_S] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
.speed = NFP_SPEED_25G,
},
[NFP_MEDIA_25GBASE_CR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
.speed = NFP_SPEED_25G,
},
[NFP_MEDIA_25GBASE_CR_S] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
.speed = NFP_SPEED_25G,
},
[NFP_MEDIA_25GBASE_SR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
.speed = NFP_SPEED_25G,
},
[NFP_MEDIA_40GBASE_CR4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
.speed = NFP_SPEED_40G,
},
[NFP_MEDIA_40GBASE_KR4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
.speed = NFP_SPEED_40G,
},
[NFP_MEDIA_40GBASE_SR4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
.speed = NFP_SPEED_40G,
},
[NFP_MEDIA_40GBASE_LR4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
.speed = NFP_SPEED_40G,
},
[NFP_MEDIA_50GBASE_KR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
.speed = NFP_SPEED_50G,
},
[NFP_MEDIA_50GBASE_SR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
.speed = NFP_SPEED_50G,
},
[NFP_MEDIA_50GBASE_CR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
.speed = NFP_SPEED_50G,
},
[NFP_MEDIA_50GBASE_LR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
.speed = NFP_SPEED_50G,
},
[NFP_MEDIA_50GBASE_ER] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
.speed = NFP_SPEED_50G,
},
[NFP_MEDIA_50GBASE_FR] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
.speed = NFP_SPEED_50G,
},
[NFP_MEDIA_100GBASE_KR4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
.speed = NFP_SPEED_100G,
},
[NFP_MEDIA_100GBASE_SR4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
.speed = NFP_SPEED_100G,
},
[NFP_MEDIA_100GBASE_CR4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
.speed = NFP_SPEED_100G,
},
[NFP_MEDIA_100GBASE_KP4] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
.speed = NFP_SPEED_100G,
},
[NFP_MEDIA_100GBASE_CR10] = {
.ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
.speed = NFP_SPEED_100G,
},
};
static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
[NFP_SPEED_1G] = SPEED_1000,
[NFP_SPEED_10G] = SPEED_10000,
[NFP_SPEED_25G] = SPEED_25000,
[NFP_SPEED_40G] = SPEED_40000,
[NFP_SPEED_50G] = SPEED_50000,
[NFP_SPEED_100G] = SPEED_100000,
};
static void nfp_add_media_link_mode(struct nfp_port *port,
@ -334,8 +430,12 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
};
struct nfp_cpp *cpp = port->app->cpp;
if (nfp_eth_read_media(cpp, &ethm))
if (nfp_eth_read_media(cpp, &ethm)) {
bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
return;
}
bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
for (u32 i = 0; i < 2; i++) {
supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
@ -344,20 +444,26 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
if (i < 64) {
if (supported_modes[0] & BIT_ULL(i))
__set_bit(nfp_eth_media_table[i],
if (supported_modes[0] & BIT_ULL(i)) {
__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
__set_bit(nfp_eth_media_table[i].speed,
port->speed_bitmap);
}
if (advertised_modes[0] & BIT_ULL(i))
__set_bit(nfp_eth_media_table[i],
__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
} else {
if (supported_modes[1] & BIT_ULL(i - 64))
__set_bit(nfp_eth_media_table[i],
if (supported_modes[1] & BIT_ULL(i - 64)) {
__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
__set_bit(nfp_eth_media_table[i].speed,
port->speed_bitmap);
}
if (advertised_modes[1] & BIT_ULL(i - 64))
__set_bit(nfp_eth_media_table[i],
__set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
}
}
@ -468,6 +574,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
bool is_supported = false;
for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
if (cmd->base.speed == nfp_eth_speed_map[i] &&
test_bit(i, port->speed_bitmap)) {
is_supported = true;
break;
}
}
if (!is_supported) {
netdev_err(netdev, "Speed %u is not supported.\n",
cmd->base.speed);
err = -EINVAL;
goto err_bad_set;
}
if (req_aneg) {
netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");

View File

@ -38,6 +38,16 @@ enum nfp_port_flags {
NFP_PORT_CHANGED = 0,
};
enum {
NFP_SPEED_1G,
NFP_SPEED_10G,
NFP_SPEED_25G,
NFP_SPEED_40G,
NFP_SPEED_50G,
NFP_SPEED_100G,
NFP_SUP_SPEED_NUMBER
};
/**
* struct nfp_port - structure representing NFP port
* @netdev: backpointer to associated netdev
@ -52,6 +62,7 @@ enum nfp_port_flags {
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
* @speed_bitmap: for %NFP_PORT_PHYS_PORT supported speed bitmap
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
@ -78,6 +89,7 @@ struct nfp_port {
bool eth_forced;
struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats;
DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
};
/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
struct {

View File

@ -708,9 +708,16 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
q->lif->index, q->name, q->hw_type, q->hw_index,
q->head_idx, ring_doorbell);
if (ring_doorbell)
if (ring_doorbell) {
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
q->dbell_jiffies = jiffies;
if (q_to_qcq(q)->napi_qcq)
mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
jiffies + IONIC_NAPI_DEADLINE);
}
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)

View File

@ -25,6 +25,12 @@
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@ -216,6 +222,8 @@ struct ionic_queue {
struct ionic_lif *lif;
struct ionic_desc_info *info;
u64 dbval;
unsigned long dbell_deadline;
unsigned long dbell_jiffies;
u16 head_idx;
u16 tail_idx;
unsigned int index;
@ -361,4 +369,8 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
bool ionic_txq_poke_doorbell(struct ionic_queue *q);
bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
#endif /* _IONIC_DEV_H_ */

View File

@ -16,6 +16,7 @@
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_dev.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
@ -200,6 +201,13 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
}
}
static void ionic_napi_deadline(struct timer_list *timer)
{
struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
napi_schedule(&qcq->napi);
}
static irqreturn_t ionic_isr(int irq, void *data)
{
struct napi_struct *napi = data;
@ -269,6 +277,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
.oper = IONIC_Q_ENABLE,
},
};
int ret;
idev = &lif->ionic->idev;
dev = lif->ionic->dev;
@ -276,16 +285,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
if (qcq->flags & IONIC_QCQ_F_INTR)
ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
ret = ionic_adminq_post_wait(lif, &ctx);
if (ret)
return ret;
if (qcq->napi.poll)
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
napi_enable(&qcq->napi);
ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
return ionic_adminq_post_wait(lif, &ctx);
return 0;
}
static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
@ -316,6 +333,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
synchronize_irq(qcq->intr.vector);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
del_timer_sync(&qcq->napi_deadline);
}
/* If there was a previous fw communcation error, don't bother with
@ -451,6 +469,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
n_qcq->napi_qcq = src_qcq->napi_qcq;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
@ -564,13 +583,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
if (flags & IONIC_QCQ_F_NOTIFYQ) {
int q_size, cq_size;
int q_size;
/* q & cq need to be contiguous in case of notifyq */
/* q & cq need to be contiguous in NotifyQ, so alloc it all in q
* and don't alloc qc. We leave new->qc_size and new->qc_base
* as 0 to be sure we don't try to free it later.
*/
q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
new->q_size = PAGE_SIZE + q_size + cq_size;
new->q_size = PAGE_SIZE + q_size +
ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
new->q_base = dma_alloc_coherent(dev, new->q_size,
&new->q_base_pa, GFP_KERNEL);
if (!new->q_base) {
@ -773,8 +794,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
qcq->napi_qcq = qcq;
timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
}
qcq->flags |= IONIC_QCQ_F_INITED;
@ -828,11 +855,17 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
qcq->napi_qcq = qcq;
timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@ -1150,6 +1183,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev = &lif->ionic->idev;
unsigned long irqflags;
unsigned int flags = 0;
bool resched = false;
int rx_work = 0;
int tx_work = 0;
int n_work = 0;
@ -1187,6 +1221,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
resched = true;
if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
resched = true;
if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
resched = true;
if (resched)
mod_timer(&lif->adminqcq->napi_deadline,
jiffies + IONIC_NAPI_DEADLINE);
return work_done;
}
@ -3245,8 +3289,14 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
qcq->napi_qcq = qcq;
timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR)

View File

@ -74,8 +74,10 @@ struct ionic_qcq {
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
struct timer_list napi_deadline;
struct napi_struct napi;
unsigned int flags;
struct ionic_qcq *napi_qcq;
struct dentry *dentry;
};

View File

@ -289,6 +289,35 @@ static void ionic_adminq_cb(struct ionic_queue *q,
complete_all(&ctx->work);
}
bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
{
struct ionic_lif *lif = q->lif;
unsigned long now, then, dif;
unsigned long irqflags;
spin_lock_irqsave(&lif->adminq_lock, irqflags);
if (q->tail_idx == q->head_idx) {
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return false;
}
now = READ_ONCE(jiffies);
then = q->dbell_jiffies;
dif = now - then;
if (dif > q->dbell_deadline) {
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
q->dbell_jiffies = now;
}
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
return true;
}
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_desc_info *desc_info;

View File

@ -22,6 +22,67 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
bool ionic_txq_poke_doorbell(struct ionic_queue *q)
{
unsigned long now, then, dif;
struct netdev_queue *netdev_txq;
struct net_device *netdev;
netdev = q->lif->netdev;
netdev_txq = netdev_get_tx_queue(netdev, q->index);
HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
if (q->tail_idx == q->head_idx) {
HARD_TX_UNLOCK(netdev, netdev_txq);
return false;
}
now = READ_ONCE(jiffies);
then = q->dbell_jiffies;
dif = now - then;
if (dif > q->dbell_deadline) {
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
q->dbell_jiffies = now;
}
HARD_TX_UNLOCK(netdev, netdev_txq);
return true;
}
bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
{
unsigned long now, then, dif;
/* no lock, called from rx napi or txrx napi, nothing else can fill */
if (q->tail_idx == q->head_idx)
return false;
now = READ_ONCE(jiffies);
then = q->dbell_jiffies;
dif = now - then;
if (dif > q->dbell_deadline) {
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
q->dbell_jiffies = now;
dif = 2 * q->dbell_deadline;
if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
q->dbell_deadline = dif;
}
return true;
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
{
return netdev_get_tx_queue(q->lif->netdev, q->index);
@ -424,6 +485,12 @@ void ionic_rx_fill(struct ionic_queue *q)
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
jiffies + IONIC_NAPI_DEADLINE);
}
void ionic_rx_empty(struct ionic_queue *q)
@ -511,6 +578,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
return work_done;
}
@ -544,23 +614,29 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
return work_done;
}
int ionic_txrx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_qcq *rxqcq = napi_to_qcq(napi);
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
struct ionic_qcq *txqcq;
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
bool resched = false;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
@ -572,7 +648,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(qcq, 0);
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}
@ -583,6 +659,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
resched = true;
if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
resched = true;
if (resched)
mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
return rx_work_done;
}

View File

@ -1034,7 +1034,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
packet->dma_range = kcalloc(page_count,
sizeof(*packet->dma_range),
GFP_KERNEL);
GFP_ATOMIC);
if (!packet->dma_range)
return -ENOMEM;

View File

@ -261,6 +261,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_mmd = genphy_read_mmd_unsupported,
.write_mmd = genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x01803301),
.name = "Meson G12A Internal PHY",

View File

@ -1812,10 +1812,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
if (ret) {
phy_device_free(phy_dev);
phy_device_free(phy_dev);
if (ret)
return ret;
}
ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)

View File

@ -57,9 +57,7 @@
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
return usbnet_read_cmd(dev, req,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE,
return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
val, index, NULL, 0);
}

View File

@ -573,6 +573,14 @@ struct mlx5_debugfs_entries {
struct dentry *lag_debugfs;
};
enum mlx5_func_type {
MLX5_PF,
MLX5_VF,
MLX5_SF,
MLX5_HOST_PF,
MLX5_FUNC_TYPE_NUM,
};
struct mlx5_ft_pool;
struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
@ -583,11 +591,10 @@ struct mlx5_priv {
struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
struct xarray page_root_xa;
u32 fw_pages;
atomic_t reg_pages;
struct list_head free_list;
u32 vfs_pages;
u32 host_pf_pages;
u32 fw_pages;
u32 page_counters[MLX5_FUNC_TYPE_NUM];
u32 fw_pages_alloc_failed;
u32 give_pages_dropped;
u32 reclaim_pages_discard;

View File

@ -18,6 +18,7 @@
#ifndef _UAPI_LINUX_IP_H
#define _UAPI_LINUX_IP_H
#include <linux/types.h>
#include <linux/stddef.h>
#include <asm/byteorder.h>
#define IPTOS_TOS_MASK 0x1E

View File

@ -4,6 +4,7 @@
#include <linux/libc-compat.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/in6.h>
#include <asm/byteorder.h>

View File

@ -165,6 +165,46 @@ static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb)
* leaving this function.
*/
ecu = j1939_ecu_get_by_name_locked(priv, name);
if (ecu && ecu->addr == skcb->addr.sa) {
/* The ISO 11783-5 standard, in "4.5.2 - Address claim
* requirements", states:
* d) No CF shall begin, or resume, transmission on the
* network until 250 ms after it has successfully claimed
* an address except when responding to a request for
* address-claimed.
*
* But "Figure 6" and "Figure 7" in "4.5.4.2 - Address-claim
* prioritization" show that the CF begins the transmission
* after 250 ms from the first AC (address-claimed) message
* even if it sends another AC message during that time window
* to resolve the address contention with another CF.
*
* As stated in "4.4.2.3 - Address-claimed message":
* In order to successfully claim an address, the CF sending
* an address claimed message shall not receive a contending
* claim from another CF for at least 250 ms.
*
* As stated in "4.4.3.2 - NAME management (NM) message":
* 1) A commanding CF can
* d) request that a CF with a specified NAME transmit
* the address-claimed message with its current NAME.
* 2) A target CF shall
* d) send an address-claimed message in response to a
* request for a matching NAME
*
* Taking the above arguments into account, the 250 ms wait is
* requested only during network initialization.
*
* Do not restart the timer on AC message if both the NAME and
* the address match and so if the address has already been
* claimed (timer has expired) or the AC message has been sent
* to resolve the contention with another CF (timer is still
* running).
*/
goto out_ecu_put;
}
if (!ecu && j1939_address_is_unicast(skcb->addr.sa))
ecu = j1939_ecu_create_locked(priv, name);

View File

@ -9979,7 +9979,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
goto err_xa_alloc;
devlink->netdevice_nb.notifier_call = devlink_netdevice_event;
ret = register_netdevice_notifier_net(net, &devlink->netdevice_nb);
ret = register_netdevice_notifier(&devlink->netdevice_nb);
if (ret)
goto err_register_netdevice_notifier;
@ -10171,8 +10171,7 @@ void devlink_free(struct devlink *devlink)
xa_destroy(&devlink->snapshot_ids);
xa_destroy(&devlink->ports);
WARN_ON_ONCE(unregister_netdevice_notifier_net(devlink_net(devlink),
&devlink->netdevice_nb));
WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb));
xa_erase(&devlinks, devlink->index);
@ -10503,6 +10502,8 @@ static int devlink_netdevice_event(struct notifier_block *nb,
break;
case NETDEV_REGISTER:
case NETDEV_CHANGENAME:
if (devlink_net(devlink) != dev_net(netdev))
return NOTIFY_OK;
/* Set the netdev on top of previously set type. Note this
* event happens also during net namespace change so here
* we take into account netdev pointer appearing in this
@ -10512,6 +10513,8 @@ static int devlink_netdevice_event(struct notifier_block *nb,
netdev);
break;
case NETDEV_UNREGISTER:
if (devlink_net(devlink) != dev_net(netdev))
return NOTIFY_OK;
/* Clear netdev pointer, but not the type. This event happens
* also during net namespace change so we need to clear
* pointer to netdev that is going to another net namespace.

View File

@ -269,7 +269,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
(n->nud_state == NUD_NOARP) ||
(tbl->is_multicast &&
tbl->is_multicast(n->primary_key)) ||
time_after(tref, n->updated))
!time_in_range(n->updated, tref, jiffies))
remove = true;
write_unlock(&n->lock);
@ -289,7 +289,17 @@ static int neigh_forced_gc(struct neigh_table *tbl)
static void neigh_add_timer(struct neighbour *n, unsigned long when)
{
/* Use safe distance from the jiffies - LONG_MAX point while timer
* is running in DELAY/PROBE state but still show to user space
* large times in the past.
*/
unsigned long mint = jiffies - (LONG_MAX - 86400 * HZ);
neigh_hold(n);
if (!time_in_range(n->confirmed, mint, jiffies))
n->confirmed = mint;
if (time_before(n->used, n->confirmed))
n->used = n->confirmed;
if (unlikely(mod_timer(&n->timer, when))) {
printk("NEIGH: BUG, double timer add, state is %x\n",
n->nud_state);
@ -1001,12 +1011,14 @@ static void neigh_periodic_work(struct work_struct *work)
goto next_elt;
}
if (time_before(n->used, n->confirmed))
if (time_before(n->used, n->confirmed) &&
time_is_before_eq_jiffies(n->confirmed))
n->used = n->confirmed;
if (refcount_read(&n->refcnt) == 1 &&
(state == NUD_FAILED ||
time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
!time_in_range_open(jiffies, n->used,
n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
neigh_mark_dead(n);
write_unlock(&n->lock);

View File

@ -1531,6 +1531,8 @@ set_sndbuf:
ret = -EINVAL;
break;
}
if ((u8)val == SOCK_TXREHASH_DEFAULT)
val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
/* Paired with READ_ONCE() in tcp_rtx_synack() */
WRITE_ONCE(sk->sk_txrehash, (u8)val);
break;
@ -3451,7 +3453,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_pacing_rate = ~0UL;
WRITE_ONCE(sk->sk_pacing_shift, 10);
sk->sk_incoming_cpu = -1;
sk->sk_txrehash = SOCK_TXREHASH_DEFAULT;
sk_rx_queue_clear(sk);
/*

View File

@ -347,6 +347,7 @@ lookup_protocol:
sk->sk_destruct = inet_sock_destruct;
sk->sk_protocol = protocol;
sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
inet->uc_ttl = -1;
inet->mc_loop = 1;

View File

@ -1225,9 +1225,6 @@ int inet_csk_listen_start(struct sock *sk)
sk->sk_ack_backlog = 0;
inet_csk_delack_init(sk);
if (sk->sk_txrehash == SOCK_TXREHASH_DEFAULT)
sk->sk_txrehash = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
/* There is race window here: we announce ourselves listening,
* but this transition is still not validated by get_port().
* It is OK, because this socket enters to hash table only

View File

@ -222,6 +222,7 @@ lookup_protocol:
np->pmtudisc = IPV6_PMTUDISC_WANT;
np->repflow = net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_ESTABLISHED;
sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
/* Init the ipv4 part of the socket since we can have sockets
* using v6 API for ipv4.

View File

@ -998,8 +998,8 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
{
int addrlen = sizeof(struct sockaddr_in);
struct sockaddr_storage addr;
struct mptcp_sock *msk;
struct socket *ssock;
struct sock *newsk;
int backlog = 1024;
int err;
@ -1008,11 +1008,13 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
if (err)
return err;
msk = mptcp_sk(entry->lsk->sk);
if (!msk)
newsk = entry->lsk->sk;
if (!newsk)
return -EINVAL;
ssock = __mptcp_nmpc_socket(msk);
lock_sock(newsk);
ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
release_sock(newsk);
if (!ssock)
return -EINVAL;

View File

@ -2897,6 +2897,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
bool do_cancel_work = false;
int subflows_alive = 0;
sk->sk_shutdown = SHUTDOWN_MASK;
@ -2922,6 +2923,8 @@ cleanup:
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bool slow = lock_sock_fast_nested(ssk);
subflows_alive += ssk->sk_state != TCP_CLOSE;
/* since the close timeout takes precedence on the fail one,
* cancel the latter
*/
@ -2937,6 +2940,12 @@ cleanup:
}
sock_orphan(sk);
/* all the subflows are closed, only timeout can change the msk
* state, let's not keep resources busy for no reasons
*/
if (subflows_alive == 0)
inet_sk_state_store(sk, TCP_CLOSE);
sock_hold(sk);
pr_debug("msk=%p state=%d", sk, sk->sk_state);
if (msk->token)

View File

@ -760,14 +760,21 @@ static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname,
static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = (struct sock *)msk;
struct socket *sock;
int ret = -EINVAL;
/* Limit to first subflow, before the connection establishment */
lock_sock(sk);
sock = __mptcp_nmpc_socket(msk);
if (!sock)
return -EINVAL;
goto unlock;
return tcp_setsockopt(sock->sk, level, optname, optval, optlen);
ret = tcp_setsockopt(sock->sk, level, optname, optval, optlen);
unlock:
release_sock(sk);
return ret;
}
static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,

View File

@ -1399,6 +1399,7 @@ void __mptcp_error_report(struct sock *sk)
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
int err = sock_error(ssk);
int ssk_state;
if (!err)
continue;
@ -1409,7 +1410,14 @@ void __mptcp_error_report(struct sock *sk)
if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
continue;
inet_sk_state_store(sk, inet_sk_state_load(ssk));
/* We need to propagate only transition to CLOSE state.
* Orphaned socket will see such state change via
* subflow_sched_work_if_closed() and that path will properly
* destroy the msk as needed.
*/
ssk_state = inet_sk_state_load(ssk);
if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
inet_sk_state_store(sk, ssk_state);
sk->sk_err = -err;
/* This barrier is coupled with smp_rmb() in mptcp_poll() */
@ -1679,7 +1687,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
if (err)
return err;
lock_sock(sf->sk);
lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
/* the newly created socket has to be in the same cgroup as its parent */
mptcp_attach_cgroup(sk, sf->sk);

View File

@ -104,9 +104,9 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
spin_lock_irqsave(&q->lock, flags);
head = &q->zcookie_head;
if (!list_empty(head)) {
info = list_entry(head, struct rds_msg_zcopy_info,
rs_zcookie_next);
if (info && rds_zcookie_add(info, cookie)) {
info = list_first_entry(head, struct rds_msg_zcopy_info,
rs_zcookie_next);
if (rds_zcookie_add(info, cookie)) {
spin_unlock_irqrestore(&q->lock, flags);
kfree(rds_info_from_znotifier(znotif));
/* caller invokes rds_wake_sk_sleep() */

View File

@ -433,7 +433,7 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
while (m) {
unsigned int prio = ffz(~m);
if (WARN_ON_ONCE(prio > ARRAY_SIZE(p->inner.clprio)))
if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
break;
m &= ~(1 << prio);

View File

@ -5,6 +5,7 @@
* Based on code and translator idea by: Florian Westphal <fw@strlen.de>
*/
#include <linux/compat.h>
#include <linux/nospec.h>
#include <linux/xfrm.h>
#include <net/xfrm.h>
@ -302,7 +303,7 @@ static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
nla_for_each_attr(nla, attrs, len, remaining) {
int err;
switch (type) {
switch (nlh_src->nlmsg_type) {
case XFRM_MSG_NEWSPDINFO:
err = xfrm_nla_cpy(dst, nla, nla_len(nla));
break;
@ -437,6 +438,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
NL_SET_ERR_MSG(extack, "Bad attribute");
return -EOPNOTSUPP;
}
type = array_index_nospec(type, XFRMA_MAX + 1);
if (nla_len(nla) < compat_policy[type].len) {
NL_SET_ERR_MSG(extack, "Attribute bad length");
return -EOPNOTSUPP;

View File

@ -279,8 +279,7 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
goto out;
if (x->props.flags & XFRM_STATE_DECAP_DSCP)
ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
ipipv6_hdr(skb));
ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb));
if (!(x->props.flags & XFRM_STATE_NOECN))
ipip6_ecn_decapsulate(skb);

View File

@ -310,6 +310,52 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
skb->mark = 0;
}
static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type, unsigned short family)
{
struct sec_path *sp;
sp = skb_sec_path(skb);
if (sp && (sp->len || sp->olen) &&
!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
goto discard;
XFRM_SPI_SKB_CB(skb)->family = family;
if (family == AF_INET) {
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
} else {
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
}
return xfrm_input(skb, nexthdr, spi, encap_type);
discard:
kfree_skb(skb);
return 0;
}
static int xfrmi4_rcv(struct sk_buff *skb)
{
return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
}
static int xfrmi6_rcv(struct sk_buff *skb)
{
return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
0, 0, AF_INET6);
}
static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
}
static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
}
static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
{
const struct xfrm_mode *inner_mode;
@ -945,8 +991,8 @@ static struct pernet_operations xfrmi_net_ops = {
};
static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
.handler = xfrm6_rcv,
.input_handler = xfrm_input,
.handler = xfrmi6_rcv,
.input_handler = xfrmi6_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 10,
@ -996,8 +1042,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
#endif
static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
.handler = xfrm4_rcv,
.input_handler = xfrm_input,
.handler = xfrmi4_rcv,
.input_handler = xfrmi4_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 10,

View File

@ -336,7 +336,7 @@ static void xfrm_policy_timer(struct timer_list *t)
}
if (xp->lft.hard_use_expires_seconds) {
time64_t tmo = xp->lft.hard_use_expires_seconds +
(xp->curlft.use_time ? : xp->curlft.add_time) - now;
(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
@ -354,7 +354,7 @@ static void xfrm_policy_timer(struct timer_list *t)
}
if (xp->lft.soft_use_expires_seconds) {
time64_t tmo = xp->lft.soft_use_expires_seconds +
(xp->curlft.use_time ? : xp->curlft.add_time) - now;
(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
if (tmo <= 0) {
warn = 1;
tmo = XFRM_KM_TIMEOUT;
@ -3661,7 +3661,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
return 1;
}
pol->curlft.use_time = ktime_get_real_seconds();
/* This lockless write can happen from different cpus. */
WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
pols[0] = pol;
npols++;
@ -3676,7 +3677,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
xfrm_pol_put(pols[0]);
return 0;
}
pols[1]->curlft.use_time = ktime_get_real_seconds();
/* This write can happen from different cpus. */
WRITE_ONCE(pols[1]->curlft.use_time,
ktime_get_real_seconds());
npols++;
}
}
@ -3742,6 +3745,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
goto reject;
}
if (if_id)
secpath_reset(skb);
xfrm_pols_put(pols, npols);
return 1;
}

View File

@ -577,7 +577,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
if (x->km.state == XFRM_STATE_EXPIRED)
goto expired;
if (x->lft.hard_add_expires_seconds) {
long tmo = x->lft.hard_add_expires_seconds +
time64_t tmo = x->lft.hard_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0) {
if (x->xflags & XFRM_SOFT_EXPIRE) {
@ -594,8 +594,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
next = tmo;
}
if (x->lft.hard_use_expires_seconds) {
long tmo = x->lft.hard_use_expires_seconds +
(x->curlft.use_time ? : now) - now;
time64_t tmo = x->lft.hard_use_expires_seconds +
(READ_ONCE(x->curlft.use_time) ? : now) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
@ -604,7 +604,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
if (x->km.dying)
goto resched;
if (x->lft.soft_add_expires_seconds) {
long tmo = x->lft.soft_add_expires_seconds +
time64_t tmo = x->lft.soft_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0) {
warn = 1;
@ -616,8 +616,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
}
}
if (x->lft.soft_use_expires_seconds) {
long tmo = x->lft.soft_use_expires_seconds +
(x->curlft.use_time ? : now) - now;
time64_t tmo = x->lft.soft_use_expires_seconds +
(READ_ONCE(x->curlft.use_time) ? : now) - now;
if (tmo <= 0)
warn = 1;
else if (tmo < next)
@ -1906,7 +1906,7 @@ out:
hrtimer_start(&x1->mtimer, ktime_set(1, 0),
HRTIMER_MODE_REL_SOFT);
if (x1->curlft.use_time)
if (READ_ONCE(x1->curlft.use_time))
xfrm_state_check_expire(x1);
if (x->props.smark.m || x->props.smark.v || x->if_id) {
@ -1940,8 +1940,8 @@ int xfrm_state_check_expire(struct xfrm_state *x)
{
xfrm_dev_state_update_curlft(x);
if (!x->curlft.use_time)
x->curlft.use_time = ktime_get_real_seconds();
if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {

View File

@ -246,7 +246,7 @@ test_vlan_ingress_modify()
bridge vlan add dev $swp2 vid 300
tc filter add dev $swp1 ingress chain $(IS1 2) pref 3 \
protocol 802.1Q flower skip_sw vlan_id 200 \
protocol 802.1Q flower skip_sw vlan_id 200 src_mac $h1_mac \
action vlan modify id 300 \
action goto chain $(IS2 0 0)

View File

@ -914,14 +914,14 @@ sysctl_set()
local value=$1; shift
SYSCTL_ORIG[$key]=$(sysctl -n $key)
sysctl -qw $key=$value
sysctl -qw $key="$value"
}
sysctl_restore()
{
local key=$1; shift
sysctl -qw $key=${SYSCTL_ORIG["$key"]}
sysctl -qw $key="${SYSCTL_ORIG[$key]}"
}
forwarding_enable()

View File

@ -498,6 +498,12 @@ kill_events_pids()
kill_wait $evts_ns2_pid
}
kill_tests_wait()
{
kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1)
wait
}
pm_nl_set_limits()
{
local ns=$1
@ -1694,6 +1700,7 @@ chk_subflow_nr()
local subflow_nr=$3
local cnt1
local cnt2
local dump_stats
if [ -n "${need_title}" ]; then
printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${msg}"
@ -1711,7 +1718,12 @@ chk_subflow_nr()
echo "[ ok ]"
fi
[ "${dump_stats}" = 1 ] && ( ss -N $ns1 -tOni ; ss -N $ns1 -tOni | grep token; ip -n $ns1 mptcp endpoint )
if [ "${dump_stats}" = 1 ]; then
ss -N $ns1 -tOni
ss -N $ns1 -tOni | grep token
ip -n $ns1 mptcp endpoint
dump_stats
fi
}
chk_link_usage()
@ -3049,7 +3061,7 @@ endpoint_tests()
pm_nl_set_limits $ns1 2 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow &
run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow 2>/dev/null &
wait_mpj $ns1
pm_nl_check_endpoint 1 "creation" \
@ -3062,14 +3074,14 @@ endpoint_tests()
pm_nl_add_endpoint $ns2 10.0.2.2 flags signal
pm_nl_check_endpoint 0 "modif is allowed" \
$ns2 10.0.2.2 id 1 flags signal
wait
kill_tests_wait
fi
if reset "delete and re-add"; then
pm_nl_set_limits $ns1 1 1
pm_nl_set_limits $ns2 1 1
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
run_tests $ns1 $ns2 10.0.1.1 4 0 0 slow &
run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null &
wait_mpj $ns2
pm_nl_del_endpoint $ns2 2 10.0.2.2
@ -3079,7 +3091,7 @@ endpoint_tests()
pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
wait_mpj $ns2
chk_subflow_nr "" "after re-add" 2
wait
kill_tests_wait
fi
}

View File

@ -293,19 +293,11 @@ setup-vm() {
elif [[ -n $vtype && $vtype == "vnifilterg" ]]; then
# Add per vni group config with 'bridge vni' api
if [ -n "$group" ]; then
if [ "$family" == "v4" ]; then
if [ $mcast -eq 1 ]; then
bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group $group
else
bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote $group
fi
else
if [ $mcast -eq 1 ]; then
bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group6 $group
else
bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote6 $group
fi
fi
if [ $mcast -eq 1 ]; then
bridge -netns hv-$hvid vni add dev $vxlandev vni $tid group $group
else
bridge -netns hv-$hvid vni add dev $vxlandev vni $tid remote $group
fi
fi
fi
done